Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4798,6 +4798,10 @@ for (unsigned i = 0; i != NumArgs; ++i) { MVT ArgVT = Outs[i].VT; + if (!Outs[i].IsFixed && ArgVT.isScalableVector()) + report_fatal_error("Passing SVE types to variadic functions is " + "currently not supported"); + ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/ !Outs[i].IsFixed); Index: llvm/test/CodeGen/AArch64/sve-varargs-broken.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-varargs-broken.ll @@ -0,0 +1,12 @@ +; RUN: not --crash llc -mtriple aarch64-linux-gnu -mattr=+sve <%s 2>&1 | FileCheck %s + +declare i32 @sve_printf(i8*, , ...) + +@.str_1 = internal constant [6 x i8] c"boo!\0A\00" + +; CHECK: Passing SVE types to variadic functions is currently not supported +define void @foo( %x) { + %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0 + call i32 (i8*, , ...) @sve_printf(i8* %f, %x, %x) + ret void +} Index: llvm/test/CodeGen/AArch64/sve-varargs.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-varargs.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s +; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t + +; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. +; WARN-NOT: warning + +declare i32 @sve_printf(i8*, , ...) + +@.str_1 = internal constant [6 x i8] c"boo!\0A\00" + +define void @foo( %x) { +; CHECK-LABEL: foo: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: adrp x0, .str_1 +; CHECK-NEXT: add x0, x0, :lo12:.str_1 +; CHECK-NEXT: bl sve_printf +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0 + call i32 (i8*, , ...) @sve_printf(i8* %f, %x) + ret void +}