Index: clang/lib/Basic/Targets/AArch64.h =================================================================== --- clang/lib/Basic/Targets/AArch64.h +++ clang/lib/Basic/Targets/AArch64.h @@ -87,6 +87,21 @@ ArrayRef getGCCRegNames() const override; ArrayRef getGCCRegAliases() const override; + + std::string convertConstraint(const char *&Constraint) const override { + std::string R; + switch (*Constraint) { + case 'U': // Three-character constraint; add "@3" hint for later parsing. + R = std::string("@3") + std::string(Constraint, 3); + Constraint += 2; + break; + default: + R = std::string(1, *Constraint); + break; + } + return R; + } + bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override; bool Index: clang/lib/Basic/Targets/AArch64.cpp =================================================================== --- clang/lib/Basic/Targets/AArch64.cpp +++ clang/lib/Basic/Targets/AArch64.cpp @@ -486,17 +486,29 @@ Info.setAllowsRegister(); return true; case 'U': + if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) { + // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7) + Info.setAllowsRegister(); + Name += 2; + return true; + } // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes. // Utf: A memory address suitable for ldp/stp in TF mode. // Usa: An absolute symbolic address. // Ush: The high part (bits 32:12) of a pc-relative symbolic address. - llvm_unreachable("FIXME: Unimplemented support for U* constraints."); + + // Better to return an error saying that it's an unrecognised constraint + // even if this is a valid constraint in gcc. + return false; case 'z': // Zero register, wzr or xzr Info.setAllowsRegister(); return true; case 'x': // Floating point and SIMD registers (V0-V15) Info.setAllowsRegister(); return true; + case 'y': // SVE registers (V0-V7) + Info.setAllowsRegister(); + return true; } return false; } Index: clang/lib/CodeGen/CGCall.cpp =================================================================== --- clang/lib/CodeGen/CGCall.cpp +++ clang/lib/CodeGen/CGCall.cpp @@ -4497,8 +4497,9 @@ // Update the largest vector width if any arguments have vector types. for (unsigned i = 0; i < IRCallArgs.size(); ++i) { if (auto *VT = dyn_cast(IRCallArgs[i]->getType())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); } // Compute the calling convention and attributes. @@ -4612,8 +4613,9 @@ // Update largest vector width from the return type. if (auto *VT = dyn_cast(CI->getType())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); // Insert instrumentation or attach profile metadata at indirect call sites. // For more details, see the comment before the definition of Index: clang/lib/CodeGen/CGStmt.cpp =================================================================== --- clang/lib/CodeGen/CGStmt.cpp +++ clang/lib/CodeGen/CGStmt.cpp @@ -2095,8 +2095,9 @@ // Update largest vector width for any vector types. if (auto *VT = dyn_cast(ResultRegTypes.back())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); } else { ArgTypes.push_back(Dest.getAddress(*this).getType()); Args.push_back(Dest.getPointer(*this)); @@ -2120,8 +2121,9 @@ // Update largest vector width for any vector types. if (auto *VT = dyn_cast(Arg->getType())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); if (Info.allowsRegister()) InOutConstraints += llvm::utostr(i); else @@ -2207,8 +2209,9 @@ // Update largest vector width for any vector types. if (auto *VT = dyn_cast(Arg->getType())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); ArgTypes.push_back(Arg->getType()); Args.push_back(Arg); Index: clang/lib/CodeGen/CodeGenFunction.cpp =================================================================== --- clang/lib/CodeGen/CodeGenFunction.cpp +++ clang/lib/CodeGen/CodeGenFunction.cpp @@ -495,13 +495,15 @@ // Scan function arguments for vector width. for (llvm::Argument &A : CurFn->args()) if (auto *VT = dyn_cast(A.getType())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); // Update vector width based on return type. if (auto *VT = dyn_cast(CurFn->getReturnType())) - LargestVectorWidth = std::max((uint64_t)LargestVectorWidth, - VT->getPrimitiveSizeInBits().getFixedSize()); + LargestVectorWidth = + std::max((uint64_t)LargestVectorWidth, + VT->getPrimitiveSizeInBits().getKnownMinSize()); // Add the required-vector-width attribute. This contains the max width from: // 1. min-vector-width attribute used in the source program. Index: clang/test/CodeGen/aarch64-sve-inline-asm-crash.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-inline-asm-crash.c @@ -0,0 +1,24 @@ +// REQUIRES: aarch64-registered-target + +// RUN: not %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns \ +// RUN: -target-feature +neon -S -O1 -o - %s 2>&1 | FileCheck %s + +// Set a vector constraint for an sve predicate register +// As the wrong constraint is used for an SVBool, +// the compiler will try to extend the nxv16i1 to an nxv16i8 +// TODO: We don't have patterns for this yet but once they are added this test +// should be updated to check for an assembler error +__SVBool_t funcB1(__SVBool_t in) +{ + __SVBool_t ret ; + asm volatile ( + "mov %[ret].b, %[in].b \n" + : [ret] "=w" (ret) + : [in] "w" (in) + :); + + return ret ; +} + +// CHECK: funcB1 +// CHECK-ERROR: fatal error: error in backend: Cannot select Index: clang/test/CodeGen/aarch64-sve-inline-asm-datatypes.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-inline-asm-datatypes.c @@ -0,0 +1,252 @@ +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns \ +// RUN: -target-feature +neon -S -O1 -o - -emit-llvm %s | FileCheck %s + +// Tests to check that all sve datatypes can be passed in as input operands +// and passed out as output operands. + +#define SVINT_TEST(DT, KIND)\ +DT func_int_##DT##KIND(DT in)\ +{\ + DT out;\ + asm volatile (\ + "ptrue p0.b\n"\ + "mov %[out]." #KIND ", p0/m, %[in]." #KIND "\n"\ + : [out] "=w" (out)\ + : [in] "w" (in)\ + : "p0"\ + );\ + return out;\ +} + +SVINT_TEST(__SVUint8_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint8_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint8_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint8_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVUint16_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint16_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint16_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint16_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVUint32_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint32_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint32_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint32_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVUint64_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint64_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint64_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVUint64_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVInt8_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt8_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt8_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt8_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVInt16_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt16_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt16_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt16_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVInt32_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt32_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt32_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt32_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVInt64_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt64_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt64_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVInt64_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + + +//Test that floats can also be used as datatypes for integer instructions +//and check all the variants which would not be possible with a float +//instruction +SVINT_TEST(__SVFloat16_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat16_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat16_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat16_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVFloat32_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat32_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat32_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat32_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + +SVINT_TEST(__SVFloat64_t,b); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.b, p0/m, $1.b\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat64_t,h); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.h, p0/m, $1.h\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat64_t,s); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.s, p0/m, $1.s\0A", "=w,w,~{p0}"( %in) +SVINT_TEST(__SVFloat64_t,d); +// CHECK: call asm sideeffect "ptrue p0.b\0Amov $0.d, p0/m, $1.d\0A", "=w,w,~{p0}"( %in) + + +#define SVBOOL_TEST(KIND)\ +__SVBool_t func_bool_##KIND(__SVBool_t in1, __SVBool_t in2)\ +{\ + __SVBool_t out;\ + asm volatile (\ + "zip1 %[out]." #KIND ", %[in1]." #KIND ", %[in2]." #KIND "\n"\ + : [out] "=Upa" (out)\ + : [in1] "Upa" (in1),\ + [in2] "Upa" (in2)\ + :);\ + return out;\ +} + +SVBOOL_TEST(b) ; +// CHECK: call asm sideeffect "zip1 $0.b, $1.b, $2.b\0A", "=@3Upa,@3Upa,@3Upa"( %in1, %in2) +SVBOOL_TEST(h) ; +// CHECK: call asm sideeffect "zip1 $0.h, $1.h, $2.h\0A", "=@3Upa,@3Upa,@3Upa"( %in1, %in2) +SVBOOL_TEST(s) ; +// CHECK: call asm sideeffect "zip1 $0.s, $1.s, $2.s\0A", "=@3Upa,@3Upa,@3Upa"( %in1, %in2) +SVBOOL_TEST(d) ; +// CHECK: call asm sideeffect "zip1 $0.d, $1.d, $2.d\0A", "=@3Upa,@3Upa,@3Upa"( %in1, %in2) + + +#define SVBOOL_TEST_UPL(DT, KIND)\ +__SVBool_t func_bool_upl_##KIND(__SVBool_t in1, DT in2, DT in3)\ +{\ + __SVBool_t out;\ + asm volatile (\ + "fadd %[out]." #KIND ", %[in1]." #KIND ", %[in2]." #KIND ", %[in3]." #KIND "\n"\ + : [out] "=w" (out)\ + : [in1] "Upl" (in1),\ + [in2] "w" (in2),\ + [in3] "w" (in3)\ + :);\ + return out;\ +} + +SVBOOL_TEST_UPL(__SVInt8_t, b) ; +// CHECK: call asm sideeffect "fadd $0.b, $1.b, $2.b, $3.b\0A", "=w,@3Upl,w,w"( %in1, %in2, %in3) +SVBOOL_TEST_UPL(__SVInt16_t, h) ; +// CHECK: call asm sideeffect "fadd $0.h, $1.h, $2.h, $3.h\0A", "=w,@3Upl,w,w"( %in1, %in2, %in3) +SVBOOL_TEST_UPL(__SVInt32_t, s) ; +// CHECK: call asm sideeffect "fadd $0.s, $1.s, $2.s, $3.s\0A", "=w,@3Upl,w,w"( %in1, %in2, %in3) +SVBOOL_TEST_UPL(__SVInt64_t, d) ; +// CHECK: call asm sideeffect "fadd $0.d, $1.d, $2.d, $3.d\0A", "=w,@3Upl,w,w"( %in1, %in2, %in3) + +#define SVFLOAT_TEST(DT,KIND)\ +DT func_float_##DT##KIND(DT inout1, DT in2)\ +{\ + asm volatile (\ + "ptrue p0." #KIND ", #1 \n"\ + "fsub %[inout1]." #KIND ", p0/m, %[inout1]." #KIND ", %[in2]." #KIND "\n"\ + : [inout1] "=w" (inout1)\ + : "[inout1]" (inout1),\ + [in2] "w" (in2)\ + : "p0");\ + return inout1 ;\ +}\ + +SVFLOAT_TEST(__SVFloat16_t,s); +// CHECK: call asm sideeffect "ptrue p0.s, #1 \0Afsub $0.s, p0/m, $0.s, $2.s\0A", "=w,0,w,~{p0}"( %inout1, %in2) +SVFLOAT_TEST(__SVFloat16_t,d); +// CHECK: call asm sideeffect "ptrue p0.d, #1 \0Afsub $0.d, p0/m, $0.d, $2.d\0A", "=w,0,w,~{p0}"( %inout1, %in2) + +SVFLOAT_TEST(__SVFloat32_t,s); +// CHECK: call asm sideeffect "ptrue p0.s, #1 \0Afsub $0.s, p0/m, $0.s, $2.s\0A", "=w,0,w,~{p0}"( %inout1, %in2) +SVFLOAT_TEST(__SVFloat32_t,d); +// CHECK: call asm sideeffect "ptrue p0.d, #1 \0Afsub $0.d, p0/m, $0.d, $2.d\0A", "=w,0,w,~{p0}"( %inout1, %in2) + +SVFLOAT_TEST(__SVFloat64_t,s); +// CHECK: call asm sideeffect "ptrue p0.s, #1 \0Afsub $0.s, p0/m, $0.s, $2.s\0A", "=w,0,w,~{p0}"( %inout1, %in2) +SVFLOAT_TEST(__SVFloat64_t,d); +// CHECK: call asm sideeffect "ptrue p0.d, #1 \0Afsub $0.d, p0/m, $0.d, $2.d\0A", "=w,0,w,~{p0}"( %inout1, %in2) + +#define SVFLOAT_TEST_Y(DT, KIND)\ +__SVBool_t func_float_y_##KIND(DT in1, DT in2)\ +{\ + __SVBool_t out;\ + asm volatile (\ + "fmul %[out]." #KIND ", %[in1]." #KIND ", %[in2]." #KIND "\n"\ + : [out] "=w" (out)\ + : [in1] "w" (in1),\ + [in2] "y" (in2)\ + :);\ + return out;\ +} + +SVFLOAT_TEST_Y(__SVFloat16_t,h); +// CHECK: call asm sideeffect "fmul $0.h, $1.h, $2.h\0A", "=w,w,y"( %in1, %in2) +SVFLOAT_TEST_Y(__SVFloat32_t,s); +// CHECK: call asm sideeffect "fmul $0.s, $1.s, $2.s\0A", "=w,w,y"( %in1, %in2) +SVFLOAT_TEST_Y(__SVFloat64_t,d); +// CHECK: call asm sideeffect "fmul $0.d, $1.d, $2.d\0A", "=w,w,y"( %in1, %in2) + + +// Another test for floats to include h suffix + +#define SVFLOAT_CVT_TEST(DT1,KIND1,DT2,KIND2)\ +DT1 func_float_cvt_##DT1##KIND1##DT2##KIND2(DT2 in1)\ +{\ + DT1 out1 ;\ + asm volatile (\ + "ptrue p0." #KIND2 ", #1 \n"\ + "fcvt %[out1]." #KIND1 ", p0/m, %[in1]." #KIND2 "\n"\ + : [out1] "=w" (out1)\ + : [in1] "w" (in1)\ + : "p0");\ + return out1 ;\ +}\ + +SVFLOAT_CVT_TEST(__SVFloat64_t,d,__SVFloat32_t,s); +// CHECK: call asm sideeffect "ptrue p0.s, #1 \0Afcvt $0.d, p0/m, $1.s\0A", "=w,w,~{p0}"( %in1) +SVFLOAT_CVT_TEST(__SVFloat64_t,d,__SVFloat16_t,h); +// CHECK: call asm sideeffect "ptrue p0.h, #1 \0Afcvt $0.d, p0/m, $1.h\0A", "=w,w,~{p0}"( %in1) +SVFLOAT_CVT_TEST(__SVFloat32_t,s,__SVFloat16_t,h); +// CHECK: call asm sideeffect "ptrue p0.h, #1 \0Afcvt $0.s, p0/m, $1.h\0A", "=w,w,~{p0}"( %in1) +SVFLOAT_CVT_TEST(__SVFloat32_t,s,__SVFloat64_t,d); +// CHECK: call asm sideeffect "ptrue p0.d, #1 \0Afcvt $0.s, p0/m, $1.d\0A", "=w,w,~{p0}"( %in1) +SVFLOAT_CVT_TEST(__SVFloat16_t,h,__SVFloat64_t,d); +// CHECK: call asm sideeffect "ptrue p0.d, #1 \0Afcvt $0.h, p0/m, $1.d\0A", "=w,w,~{p0}"( %in1) +SVFLOAT_CVT_TEST(__SVFloat16_t,h,__SVFloat32_t,s); +// CHECK: call asm sideeffect "ptrue p0.s, #1 \0Afcvt $0.h, p0/m, $1.s\0A", "=w,w,~{p0}"( %in1) + +//Test a mix of float and ints +SVFLOAT_CVT_TEST(__SVInt16_t,h,__SVFloat32_t,s); +// CHECK: call asm sideeffect "ptrue p0.s, #1 \0Afcvt $0.h, p0/m, $1.s\0A", "=w,w,~{p0}"( %in1) +SVFLOAT_CVT_TEST(__SVFloat16_t,s,__SVUint32_t,d); +// CHECK: call asm sideeffect "ptrue p0.d, #1 \0Afcvt $0.s, p0/m, $1.d\0A", "=w,w,~{p0}"( %in1) Index: clang/test/CodeGen/aarch64-sve-inline-asm-negative-test.c =================================================================== --- /dev/null +++ clang/test/CodeGen/aarch64-sve-inline-asm-negative-test.c @@ -0,0 +1,21 @@ +// REQUIRES: aarch64-registered-target + +// RUN: not %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns \ +// RUN: -target-feature +neon -S -O1 -o - %s | FileCheck %s + +// Assembler error +// Output constraint : Set a vector constraint on an integer +__SVFloat32_t funcB2() +{ + __SVFloat32_t ret ; + asm volatile ( + "fmov %[ret], wzr \n" + : [ret] "=w" (ret) + : + :); + + return ret ; +} + +// CHECK: funcB2 +// CHECK-ERROR: error: invalid operand for instruction