Index: clang/lib/CodeGen/TargetInfo.cpp =================================================================== --- clang/lib/CodeGen/TargetInfo.cpp +++ clang/lib/CodeGen/TargetInfo.cpp @@ -5480,6 +5480,11 @@ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType Ty) const override { + llvm::Type *BaseTy = CGF.ConvertType(Ty); + if (isa(BaseTy)) + llvm::report_fatal_error("Passing SVE types to variadic functions is " + "currently not supported"); + return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty) : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF) : EmitAAPCSVAArg(VAListAddr, Ty, CGF); Index: clang/test/CodeGen/aarch64-varargs-sve.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-varargs-sve.c @@ -0,0 +1,21 @@ +// REQUIRES: aarch64-registered-target +// RUN: not %clang_cc1 -triple aarch64-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -emit-llvm -o - %s 2>&1 | FileCheck %s +// RUN: not %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -target-feature +sve -fallow-half-arguments-and-returns -emit-llvm -o - %s 2>&1 | FileCheck %s + +// CHECK: Passing SVE types to variadic functions is currently not supported + +#include +#include + +double foo(char *str, ...) { + va_list ap; + svfloat64_t v; + double x; + + va_start(ap, str); + v = va_arg(ap, svfloat64_t); + x = va_arg(ap, double); + va_end(ap); + + return x + svaddv(svptrue_b8(), v); +} Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4798,6 +4798,10 @@ for (unsigned i = 0; i != NumArgs; ++i) { MVT ArgVT = Outs[i].VT; + if (!Outs[i].IsFixed && ArgVT.isScalableVector()) + report_fatal_error("Passing SVE types to variadic functions is " + "currently not supported"); + ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/ !Outs[i].IsFixed); @@ -6597,6 +6601,10 @@ Chain = VAList.getValue(1); VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT); + if (VT.isScalableVector()) + report_fatal_error("Passing SVE types to variadic functions is " + "currently not supported"); + if (Align && *Align > MinSlotSize) { VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(Align->value() - 1, DL, PtrVT)); Index: llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-varargs-callee-broken.ll @@ -0,0 +1,22 @@ +; RUN: not --crash llc -mtriple arm64-apple-ios7 -mattr=+sve < %s 2>&1 | FileCheck %s + +; CHECK: Passing SVE types to variadic functions is currently not supported + +@.str = private unnamed_addr constant [4 x i8] c"fmt\00", align 1 +define void @foo(i8* %fmt, ...) nounwind { +entry: + %fmt.addr = alloca i8*, align 8 + %args = alloca i8*, align 8 + %vc = alloca i32, align 4 + %vv = alloca , align 16 + store i8* %fmt, i8** %fmt.addr, align 8 + %args1 = bitcast i8** %args to i8* + call void @llvm.va_start(i8* %args1) + %0 = va_arg i8** %args, i32 + store i32 %0, i32* %vc, align 4 + %1 = va_arg i8** %args, + store %1, * %vv, align 16 + ret void +} + +declare void @llvm.va_start(i8*) nounwind Index: llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-varargs-caller-broken.ll @@ -0,0 +1,12 @@ +; RUN: not --crash llc -mtriple aarch64-linux-gnu -mattr=+sve <%s 2>&1 | FileCheck %s + +declare i32 @sve_printf(i8*, , ...) + +@.str_1 = internal constant [6 x i8] c"boo!\0A\00" + +; CHECK: Passing SVE types to variadic functions is currently not supported +define void @foo( %x) { + %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0 + call i32 (i8*, , ...) @sve_printf(i8* %f, %x, %x) + ret void +} Index: llvm/test/CodeGen/AArch64/sve-varargs.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/sve-varargs.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s +; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t + +; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it. +; WARN-NOT: warning + +declare i32 @sve_printf(i8*, , ...) + +@.str_1 = internal constant [6 x i8] c"boo!\0A\00" + +define void @foo( %x) { +; CHECK-LABEL: foo: +; CHECK: // %bb.0: +; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: .cfi_offset w30, -16 +; CHECK-NEXT: adrp x0, .str_1 +; CHECK-NEXT: add x0, x0, :lo12:.str_1 +; CHECK-NEXT: bl sve_printf +; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret + %f = getelementptr [6 x i8], [6 x i8]* @.str_1, i64 0, i64 0 + call i32 (i8*, , ...) @sve_printf(i8* %f, %x) + ret void +}