diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -2688,6 +2688,27 @@ break; } + // VLST arguments are coerced to VLATs at the function boundary for + // ABI consistency. If this is a VLST that was coerced to + // a VLAT at the function boundary and the types match up, use + // llvm.experimental.vector.extract to convert back to the original + // VLST. + if (auto *VecTyTo = dyn_cast(ConvertType(Ty))) { + auto *Coerced = Fn->getArg(FirstIRArg); + if (auto *VecTyFrom = + dyn_cast(Coerced->getType())) { + if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { + llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); + + assert(NumIRArgs == 1); + Coerced->setName(Arg->getName() + ".coerce"); + ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( + VecTyTo, Coerced, Zero, "castFixedSve"))); + break; + } + } + } + Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), Arg->getName()); diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c @@ -59,14 +59,14 @@ // CHECK-LABEL: define{{.*}} void @f2( // CHECK-SAME: <[[#div(VBITS,8)]] x i8>* noalias nocapture sret(<[[#div(VBITS,8)]] x i8>) align 16 %agg.result, <[[#div(VBITS,8)]] x i8>* nocapture readonly %0) -// CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP1]], [[CASTSCALABLESVE]], i32 1) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) -// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] -// CHECK-NEXT: ret void +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[TMP0:%.*]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v[[#div(VBITS,8)]]i8( undef, <[[#div(VBITS,8)]] x i8> [[X]], i64 0) +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.asrd.nxv16i8( [[TMP1]], [[CASTSCALABLESVE]], i32 1) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[TMP2]], i64 0) +// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[CASTFIXEDSVE]], <[[#div(VBITS,8)]] x i8>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: ret void vec_int8 f2(vec_int8 x) { return svasrd_x(svptrue_b8(), x, 1); } #endif @@ -78,24 +78,18 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N))); // CHECK128-LABEL: define{{.*}} void @g( %x.coerce) -// CHECK128-NEXT: entry: -// CHECK128-NEXT: [[X:%.*]] = alloca <16 x i8>, align 16 -// CHECK128-NEXT: [[TMP0:%.*]] = bitcast <16 x i8>* [[X]] to * -// CHECK128-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK128-NEXT: [[X1:%.*]] = load <16 x i8>, <16 x i8>* [[X]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK128-NEXT: call void @f3(<16 x i8> [[X1]]) [[ATTR5:#.*]] +// CHECK128-NEXT: entry: +// CHECK128-NEXT: [[X:%.*]] = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: call void @f3(<16 x i8> [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void // CHECK-LABEL: define{{.*}} void @g( %x.coerce) -// CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 -// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <[[#div(VBITS,8)]] x i8>* [[X]] to * -// CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <[[#div(VBITS,8)]] x i8>, <[[#div(VBITS,8)]] x i8>* [[X]], align 16, [[TBAA6]] -// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X1]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] -// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] -// CHECK-NEXT: ret void +// CHECK-NEXT: entry: +// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16 +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS,8)]] x i8> @llvm.experimental.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], <[[#div(VBITS,8)]] x i8>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]] +// CHECK-NEXT: call void @f3(<[[#div(VBITS,8)]] x i8>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] +// CHECK-NEXT: ret void // CHECK128-LABEL: declare void @f3(<16 x i8>) diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp --- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp +++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp @@ -48,20 +48,14 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EES_( %x.coerce, %y.coerce) // CHECK-NEXT: entry: -// CHECK-NEXT: %x = alloca <[[#div(VBITS,32)]] x i32>, align 16 -// CHECK-NEXT: %y = alloca <[[#div(VBITS,32)]] x i32>, align 16 -// CHECK-NEXT: %retval.coerce = alloca , align 16 -// CHECK-NEXT: %0 = bitcast <[[#div(VBITS,32)]] x i32>* %x to * -// CHECK-NEXT: store %x.coerce, * %0, align 16 -// CHECK-NEXT: %x1 = load <[[#div(VBITS,32)]] x i32>, <[[#div(VBITS,32)]] x i32>* %x, align 16 -// CHECK-NEXT: %1 = bitcast <[[#div(VBITS,32)]] x i32>* %y to * -// CHECK-NEXT: store %y.coerce, * %1, align 16 -// CHECK-NEXT: %y2 = load <[[#div(VBITS,32)]] x i32>, <[[#div(VBITS,32)]] x i32>* %y, align 16 -// CHECK-NEXT: %add = add <[[#div(VBITS,32)]] x i32> %y2, %x1 -// CHECK-NEXT: %retval.0..sroa_cast = bitcast * %retval.coerce to <[[#div(VBITS,32)]] x i32>* -// CHECK-NEXT: store <[[#div(VBITS,32)]] x i32> %add, <[[#div(VBITS,32)]] x i32>* %retval.0..sroa_cast, align 16 -// CHECK-NEXT: %2 = load , * %retval.coerce, align 16 -// CHECK-NEXT: ret %2 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[X:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[Y:%.*]] = call <[[#div(VBITS, 32)]] x i32> @llvm.experimental.vector.extract.v[[#div(VBITS, 32)]]i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) +// CHECK-NEXT: [[ADD:%.*]] = add <[[#div(VBITS, 32)]] x i32> [[Y]], [[X]] +// CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <[[#div(VBITS, 32)]] x i32>* +// CHECK-NEXT: store <[[#div(VBITS, 32)]] x i32> [[ADD]], <[[#div(VBITS, 32)]] x i32>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP0]] typedef svint32_t vec __attribute__((arm_sve_vector_bits(N))); auto f(vec x, vec y) { return x + y; } // Returns a vec. #endif @@ -76,19 +70,13 @@ // CHECK-SAME: [[#VBITS]] // CHECK-SAME: EE( %x.coerce) // CHECK-NEXT: entry: -// CHECK128-NEXT: %x = alloca <[[#div(VBITS,16)]] x i16>, align 16 -// CHECK128-NEXT: %0 = bitcast <[[#div(VBITS,16)]] x i16>* %x to * -// CHECK128-NEXT: store %x.coerce, * %0, align 16 -// CHECK128-NEXT: %x1 = load <[[#div(VBITS,16)]] x i16>, <[[#div(VBITS,16)]] x i16>* %x, align 16 -// CHECK128-NEXT: call void @_Z1fDv[[#div(VBITS,16)]]_s(<[[#div(VBITS,16)]] x i16> %x1) +// CHECK128-NEXT: [[X:%.*]] = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECK128-NEXT: call void @_Z1fDv8_s(<8 x i16> [[X]]) [[ATTR5:#.*]] // CHECK128-NEXT: ret void -// CHECKWIDE-NEXT: %x = alloca <[[#div(VBITS,16)]] x i16>, align 16 -// CHECKWIDE-NEXT: %indirect-arg-temp = alloca <[[#div(VBITS,16)]] x i16>, align 16 -// CHECKWIDE-NEXT: %0 = bitcast <[[#div(VBITS,16)]] x i16>* %x to * -// CHECKWIDE-NEXT: store %x.coerce, * %0, align 16 -// CHECKWIDE-NEXT: %x1 = load <[[#div(VBITS,16)]] x i16>, <[[#div(VBITS,16)]] x i16>* %x, align 16 -// CHECKWIDE-NEXT: store <[[#div(VBITS,16)]] x i16> %x1, <[[#div(VBITS,16)]] x i16>* %indirect-arg-temp, align 16 -// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS,16)]]_s(<[[#div(VBITS,16)]] x i16>* nonnull %indirect-arg-temp) +// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16 +// CHECKWIDE-NEXT: [[X:%.*]] = call <[[#div(VBITS, 16)]] x i16> @llvm.experimental.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16( [[X_COERCE:%.*]], i64 0) +// CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], <[[#div(VBITS, 16)]] x i16>* [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]] +// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(<[[#div(VBITS, 16)]] x i16>* nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]] // CHECKWIDE-NEXT: ret void void g(vec2 x) { f(x); } // OK #endif diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -24,17 +24,14 @@ // CHECK-LABEL: @fixed_caller( // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to * -// CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[X1]], i64 0) +// CHECK-NEXT: [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[X]], i64 0) // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[CASTSCALABLESVE]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP1]] +// CHECK-NEXT: [[TMP0:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP0]] // fixed_int32_t fixed_caller(fixed_int32_t x) { return sizeless_callee(x); @@ -42,15 +39,12 @@ // CHECK-LABEL: @fixed_callee( // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to * -// CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6]] +// CHECK-NEXT: [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* -// CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP1]] +// CHECK-NEXT: store <16 x i32> [[X]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP0]] // fixed_int32_t fixed_callee(fixed_int32_t x) { return x; @@ -67,7 +61,7 @@ // CHECK-NEXT: [[CALL:%.*]] = call @fixed_callee( [[TMP0]]) // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[COERCE1]] to * // CHECK-NEXT: store [[CALL]], * [[TMP1]], align 16 -// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // @@ -81,24 +75,18 @@ // CHECK-LABEL: @call_int32_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[OP2:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to * -// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP2]] to * -// CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP11]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP22]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP2]], [[CASTSCALABLESVE]], [[CASTSCALABLESVE3]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP3]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[OP2:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE1:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP1]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP2]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[CASTSCALABLESVE]], [[CASTSCALABLESVE3]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP4]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP2]] // fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) { return svsel(pg, op1, op2); @@ -106,24 +94,18 @@ // CHECK-LABEL: @call_float64_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16 -// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to * -// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP2]] to * -// CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP11]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP22]], i64 0) -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP2]], [[CASTSCALABLESVE]], [[CASTSCALABLESVE3]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP3]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[OP2:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X_COERCE1:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP1]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP2]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[CASTSCALABLESVE]], [[CASTSCALABLESVE3]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP4]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP2]] // fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) { return svsel(pg, op1, op2); @@ -168,19 +150,16 @@ // CHECK-LABEL: @call_int32_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to * -// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP11]], i64 0) -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP1]], [[CASTSCALABLESVE]], [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP2]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP1]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[CASTSCALABLESVE]], [[OP2:%.*]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP3]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP2]] // fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) { return svsel(pg, op1, op2); @@ -188,19 +167,16 @@ // CHECK-LABEL: @call_float64_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to * -// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP11]], i64 0) -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP1]], [[CASTSCALABLESVE]], [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP2]], i64 0) +// CHECK-NEXT: [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP1]], i64 0) +// CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[CASTSCALABLESVE]], [[OP2:%.*]]) +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP1]], i64 0) // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP3]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP2]] // fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -13,11 +13,8 @@ // CHECK-LABEL: @to_svint32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to * -// CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE1]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // svint32_t to_svint32_t(fixed_int32_t type) { @@ -39,11 +36,8 @@ // CHECK-LABEL: @to_svfloat64_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <8 x double>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[TYPE]] to * -// CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, [[TBAA6]] -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TYPE1]], i64 0) +// CHECK-NEXT: [[TYPE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // svfloat64_t to_svfloat64_t(fixed_float64_t type) { @@ -69,7 +63,7 @@ // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to * // CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-NEXT: store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to * // CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] @@ -130,11 +124,8 @@ // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to * -// CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA6]] -// CHECK-NEXT: store <16 x i32> [[TYPE1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) +// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void // gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) {