diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -8670,6 +8670,14 @@ return false; } +/// getSVETypeSize - Return SVE vector or perdicate register size. +static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { + assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); + return Ty->getKind() == BuiltinType::SveBool + ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth() + : Context.getLangOpts().ArmSveVectorBits; +} + bool ASTContext::areCompatibleSveTypes(QualType FirstType, QualType SecondType) { assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || @@ -8687,7 +8695,7 @@ return VT->getElementType().getCanonicalType() == FirstType->getSveEltType(*this); else if (VT->getVectorKind() == VectorType::GenericVector) - return getTypeSize(SecondType) == getLangOpts().ArmSveVectorBits && + return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && hasSameType(VT->getElementType(), getBuiltinVectorTypeInfo(BT).ElementType); } @@ -8706,7 +8714,8 @@ "Expected SVE builtin type and vector type!"); auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { - if (!FirstType->getAs()) + const auto *BT = FirstType->getAs(); + if (!BT) return false; const auto *VecTy = SecondType->getAs(); @@ -8716,13 +8725,19 @@ const LangOptions::LaxVectorConversionKind LVCKind = getLangOpts().getLaxVectorConversions(); + // Can not convert between sve predicates and sve vectors because of + // different size. + if (BT->getKind() == BuiltinType::SveBool && + VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) + return false; + // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly // converts to VLAT and VLAT implicitly converts to GNUT." // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and // predicates. if (VecTy->getVectorKind() == VectorType::GenericVector && - getTypeSize(SecondType) != getLangOpts().ArmSveVectorBits) + getTypeSize(SecondType) != getSVETypeSize(*this, BT)) return false; // If -flax-vector-conversions=all is specified, the types are diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -12569,15 +12569,13 @@ checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); // Strip vector types. - if (const auto *SourceVT = dyn_cast(Source)) { - if (Target->isVLSTBuiltinType()) { - auto SourceVectorKind = SourceVT->getVectorKind(); - if (SourceVectorKind == VectorType::SveFixedLengthDataVector || - SourceVectorKind == VectorType::SveFixedLengthPredicateVector || - (SourceVectorKind == VectorType::GenericVector && - S.Context.getTypeSize(Source) == S.getLangOpts().ArmSveVectorBits)) - return; - } + if (isa(Source)) { + if (Target->isVLSTBuiltinType() && + (S.Context.areCompatibleSveTypes(QualType(Target, 0), + QualType(Source, 0)) || + S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), + QualType(Source, 0)))) + return; if (!isa(Target)) { if (S.SourceMgr.isInSystemMacro(CC)) diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c @@ -7,6 +7,7 @@ typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); +typedef uint8_t uint8_vec_t __attribute__((vector_size(N / 64))); fixed_bool_t global_pred; fixed_int32_t global_vec; @@ -115,26 +116,26 @@ // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca , align 2 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 16 -// CHECK-NEXT: [[XX:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[YY:%.*]] = alloca <16 x i32>, align 16 +// CHECK-NEXT: [[XX:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[YY:%.*]] = alloca <8 x i8>, align 8 // CHECK-NEXT: [[PG:%.*]] = alloca , align 2 // CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 -// CHECK-NEXT: [[SAVED_VALUE1:%.*]] = alloca <16 x i32>, align 64 +// CHECK-NEXT: [[SAVED_VALUE1:%.*]] = alloca <8 x i8>, align 8 // CHECK-NEXT: store [[PRED:%.*]], * [[PRED_ADDR]], align 2 // CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 -// CHECK-NEXT: store <16 x i32> , <16 x i32>* [[XX]], align 16 -// CHECK-NEXT: store <16 x i32> , <16 x i32>* [[YY]], align 16 +// CHECK-NEXT: store <8 x i8> , <8 x i8>* [[XX]], align 8 +// CHECK-NEXT: store <8 x i8> , <8 x i8>* [[YY]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[SAVED_VALUE]], align 8 // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[CASTFIXEDSVE]], align 8 -// CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[XX]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[YY]], align 16 -// CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[TMP3]], [[TMP4]] -// CHECK-NEXT: store <16 x i32> [[ADD]], <16 x i32>* [[SAVED_VALUE1]], align 64 -// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = bitcast <16 x i32>* [[SAVED_VALUE1]] to * -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[CASTFIXEDSVE2]], align 64 +// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[XX]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[YY]], align 8 +// CHECK-NEXT: [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]] +// CHECK-NEXT: store <8 x i8> [[ADD]], <8 x i8>* [[SAVED_VALUE1]], align 8 +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE1]] to * +// CHECK-NEXT: [[TMP5:%.*]] = load , * [[CASTFIXEDSVE2]], align 8 // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP5]]) // CHECK-NEXT: store [[TMP6]], * [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load , * [[PG]], align 2 @@ -150,8 +151,8 @@ // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t test_cast(svbool_t pred, svint32_t vec) { - fixed_int32_t xx = {1, 2, 3, 4}; - fixed_int32_t yy = {2, 5, 4, 6}; + uint8_vec_t xx = {1, 2, 3, 4}; + uint8_vec_t yy = {2, 5, 4, 6}; svbool_t pg = svand_z(pred, global_pred, xx + yy); return svadd_m(pg, global_vec, vec); } diff --git a/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp b/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp --- a/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp +++ b/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp @@ -15,6 +15,7 @@ typedef svfloat64_t fixed_float64_t FIXED_ATTR; typedef svint32_t fixed_int32_t FIXED_ATTR; typedef svint64_t fixed_int64_t FIXED_ATTR; +typedef svbool_t fixed_bool_t FIXED_ATTR; // SVE VLSTs can be cast to SVE VLATs, regardless of lane size. // NOTE: the list below is NOT exhaustive for all SVE types. @@ -47,3 +48,5 @@ TESTCASE(fixed_int64_t, svfloat64_t) TESTCASE(fixed_int64_t, svint32_t) TESTCASE(fixed_int64_t, svint64_t) + +TESTCASE(fixed_bool_t, svbool_t) diff --git a/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp b/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp --- a/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp +++ b/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp @@ -2,22 +2,24 @@ // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -msve-vector-bits=512 -flax-vector-conversions=integer -fallow-half-arguments-and-returns -ffreestanding -fsyntax-only -verify=lax-vector-integer %s // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -msve-vector-bits=512 -flax-vector-conversions=all -fallow-half-arguments-and-returns -ffreestanding -fsyntax-only -verify=lax-vector-all %s -// lax-vector-all-no-diagnostics - #include #define N __ARM_FEATURE_SVE_BITS #define SVE_FIXED_ATTR __attribute__((arm_sve_vector_bits(N))) #define GNU_FIXED_ATTR __attribute__((vector_size(N / 8))) +#define GNU_BOOL_FIXED_ATTR __attribute__((vector_size(N / 64))) typedef svfloat32_t sve_fixed_float32_t SVE_FIXED_ATTR; typedef svint32_t sve_fixed_int32_t SVE_FIXED_ATTR; +typedef svbool_t sve_fixed_bool_t SVE_FIXED_ATTR; typedef float gnu_fixed_float32_t GNU_FIXED_ATTR; typedef int gnu_fixed_int32_t GNU_FIXED_ATTR; +typedef int8_t gnu_fixed_bool_t GNU_BOOL_FIXED_ATTR; void sve_allowed_with_integer_lax_conversions() { sve_fixed_int32_t fi32; svint64_t si64; + svbool_t sb8; // The implicit cast here should fail if -flax-vector-conversions=none, but pass if // -flax-vector-conversions={integer,all}. @@ -25,6 +27,15 @@ // lax-vector-none-error@-1 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} si64 = fi32; // lax-vector-none-error@-1 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}} + + fi32 = sb8; + // lax-vector-none-error@-1 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} + // lax-vector-integer-error@-2 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} + // lax-vector-all-error@-3 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} + sb8 = fi32; + // lax-vector-none-error@-1 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}} + // lax-vector-integer-error@-2 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}} + // lax-vector-all-error@-3 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}} } void sve_allowed_with_all_lax_conversions() { @@ -44,6 +55,7 @@ void gnu_allowed_with_integer_lax_conversions() { gnu_fixed_int32_t fi32; svint64_t si64; + svbool_t sb8; // The implicit cast here should fail if -flax-vector-conversions=none, but pass if // -flax-vector-conversions={integer,all}. @@ -51,6 +63,15 @@ // lax-vector-none-error@-1 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} si64 = fi32; // lax-vector-none-error@-1 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}} + + fi32 = sb8; + // lax-vector-none-error@-1 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} + // lax-vector-integer-error@-2 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} + // lax-vector-all-error@-3 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}} + sb8 = fi32; + // lax-vector-none-error@-1 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}} + // lax-vector-integer-error@-2 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}} + // lax-vector-all-error@-3 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}} } void gnu_allowed_with_all_lax_conversions() { diff --git a/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp b/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp --- a/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp +++ b/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp @@ -9,6 +9,10 @@ typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N))); typedef int8_t gnu_int8_t __attribute__((vector_size(N / 8))); +typedef __SVBool_t svbool_t; +typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); +typedef int8_t gnu_bool_t __attribute__((vector_size(N / 64))); + template struct S { T var; }; S s; @@ -24,3 +28,11 @@ // Test implicit casts between GNU and VLS vectors fixed_int8_t to_fixed_int8_t__from_gnu_int8_t(gnu_int8_t x) { return x; } gnu_int8_t from_fixed_int8_t__to_gnu_int8_t(fixed_int8_t x) { return x; } + +// Test implicit casts between VLA and VLS perdicates +svbool_t to_svbool_t(fixed_bool_t x) { return x; } +fixed_bool_t from_svbool_t(svbool_t x) { return x; } + +// Test implicit casts between GNU and VLA predicates +svbool_t to_svbool_t__from_gnu_bool_t(gnu_bool_t x) { return x; } +gnu_bool_t from_svbool_t__to_gnu_bool_t(svbool_t x) { return x; }