diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -2095,24 +2095,18 @@ isa(DstTy)) || (isa(SrcTy) && isa(DstTy))) { - if (const CallExpr *CE = dyn_cast(E)) { - // Call expressions can't have a scalar return unless the return type - // is a reference type so an lvalue can't be emitted. Create a temp - // alloca to store the call, bitcast the address then load. - QualType RetTy = CE->getCallReturnType(CGF.getContext()); - Address Addr = - CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue"); - LValue LV = CGF.MakeAddrLValue(Addr, RetTy); - CGF.EmitStoreOfScalar(Src, LV); - Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy), - "castFixedSve"); - LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); - DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); - return EmitLoadOfLValue(DestLV, CE->getExprLoc()); - } - - Address Addr = EmitLValue(E).getAddress(CGF); - Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy)); + QualType Ty = E->getType(); + // Call expressions can't have a scalar return unless the return type + // is a reference type so an lvalue can't be emitted. Create a temp + // alloca to store the call, bitcast the address then load. + if (const CallExpr *CE = dyn_cast(E)) + Ty = CE->getCallReturnType(CGF.getContext()); + + Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value"); + LValue LV = CGF.MakeAddrLValue(Addr, Ty); + CGF.EmitStoreOfScalar(Src, LV); + Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy), + "castFixedSve"); LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy); DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo()); return EmitLoadOfLValue(DestLV, CE->getExprLoc()); diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c @@ -30,21 +30,21 @@ // CHECK-128-LABEL: @read_int64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* [[ARRAYIDX]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_int64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* [[ARRAYIDX]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v4i64( undef, <4 x i64> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_int64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[ARRAYIDX]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // @@ -56,21 +56,21 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_int64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <4 x i64> [[CASTFIXEDSVE]], <4 x i64>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-256-NEXT: store <4 x i64> [[CASTFIXEDSVE]], <4 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_int64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_int64(struct struct_int64 *s, svint64_t x) { @@ -84,21 +84,21 @@ // CHECK-128-LABEL: @read_float64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v2f64( undef, <2 x double> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_float64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, <4 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x double>, <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v4f64( undef, <4 x double> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_float64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x double>, <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // @@ -110,21 +110,21 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x double> @llvm.experimental.vector.extract.v2f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <2 x double> [[CASTFIXEDSVE]], <2 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-128-NEXT: store <2 x double> [[CASTFIXEDSVE]], <2 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_float64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <4 x double> @llvm.experimental.vector.extract.v4f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <4 x double> [[CASTFIXEDSVE]], <4 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-256-NEXT: store <4 x double> [[CASTFIXEDSVE]], <4 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_float64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-512-NEXT: store <8 x double> [[CASTFIXEDSVE]], <8 x double>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_float64(struct struct_float64 *s, svfloat64_t x) { @@ -138,21 +138,21 @@ // CHECK-128-LABEL: @read_bfloat16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-256-LABEL: @read_bfloat16( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: [[TMP0:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-256-NEXT: [[TMP0:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v16bf16( undef, <16 x bfloat> [[TMP0]], i64 0) // CHECK-256-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_bfloat16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // @@ -164,21 +164,21 @@ // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bfloat16( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x bfloat> @llvm.experimental.vector.extract.v16bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <16 x bfloat> [[CASTFIXEDSVE]], <16 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-256-NEXT: store <16 x bfloat> [[CASTFIXEDSVE]], <16 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bfloat16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[X:%.*]], i64 0) // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] +// CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* [[ARRAYIDX]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) { @@ -191,23 +191,32 @@ // CHECK-128-LABEL: @read_bool( // CHECK-128-NEXT: entry: +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 16 // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <2 x i8>* [[ARRAYIDX]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] +// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <2 x i8>* [[SAVED_VALUE]] to * +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_bool( // CHECK-256-NEXT: entry: +// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <4 x i8>, align 16 // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <4 x i8>* [[ARRAYIDX]] to * -// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA6]] +// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] +// CHECK-256-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <4 x i8>* [[SAVED_VALUE]] to * +// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_bool( // CHECK-512-NEXT: entry: +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[ARRAYIDX]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] +// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_bool(struct struct_bool *s) { @@ -216,32 +225,32 @@ // CHECK-128-LABEL: @write_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] -// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <2 x i8>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 +// CHECK-128-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <2 x i8>* +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <2 x i8> [[TMP1]], <2 x i8>* [[ARRAYIDX]], align 2, [[TBAA6]] +// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bool( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] -// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <4 x i8>* -// CHECK-256-NEXT: [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 +// CHECK-256-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <4 x i8>* +// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <4 x i8> [[TMP1]], <4 x i8>* [[ARRAYIDX]], align 2, [[TBAA6]] +// CHECK-256-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] -// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <8 x i8>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 +// CHECK-512-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[ARRAYIDX]], align 2, [[TBAA6]] +// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_bool(struct struct_bool *s, svbool_t x) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -79,9 +79,9 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[OP2:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[OP2_ADDR:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 +// CHECK-NEXT: [[SAVED_VALUE3:%.*]] = alloca <8 x i8>, align 16 +// CHECK-NEXT: [[SAVED_VALUE5:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 @@ -89,20 +89,20 @@ // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 // CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to * -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP3]], [[TMP5]]) -// CHECK-NEXT: store [[TMP6]], * [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[SAVED_VALUE3]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[CASTFIXEDSVE4:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE3]] to * +// CHECK-NEXT: [[TMP3:%.*]] = load , * [[CASTFIXEDSVE4]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP2]], [[TMP3]]) +// CHECK-NEXT: store [[TMP4]], * [[SAVED_VALUE5]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-NEXT: [[CASTFIXEDSVE6:%.*]] = bitcast * [[SAVED_VALUE5]] to <8 x i8>* +// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE6]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP8:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP8]] +// CHECK-NEXT: store <8 x i8> [[TMP5]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP6]] // fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) { return svsel(pg, op1, op2); @@ -135,23 +135,23 @@ // CHECK-LABEL: @call_bool_fs( // CHECK-NEXT: entry: // CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[OP1_ADDR:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 +// CHECK-NEXT: [[SAVED_VALUE2:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP2]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP3]], * [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]] -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP1]], [[OP2:%.*]]) +// CHECK-NEXT: store [[TMP2]], * [[SAVED_VALUE2]], align 16, !tbaa [[TBAA9]] +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = bitcast * [[SAVED_VALUE2]] to <8 x i8>* +// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE3]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP5]] +// CHECK-NEXT: store <8 x i8> [[TMP3]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: ret [[TMP4]] // fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) { return svsel(pg, op1, op2); @@ -183,11 +183,11 @@ // CHECK-LABEL: @call_bool_ss( // CHECK-NEXT: entry: -// CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP0]], * [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]] -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* +// CHECK-NEXT: store [[TMP0]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9]] +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c @@ -17,13 +17,19 @@ // CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca , align 2 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[PG:%.*]] = alloca , align 2 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE1:%.*]] = alloca <8 x i8>, align 8 // CHECK-NEXT: store [[PRED:%.*]], * [[PRED_ADDR]], align 2 // CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[TMP2:%.*]] = load , * bitcast (<8 x i8>* @global_pred to *), align 2 +// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[SAVED_VALUE]], align 8 +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[CASTFIXEDSVE]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 -// CHECK-NEXT: [[TMP4:%.*]] = load , * bitcast (<8 x i8>* @global_pred to *), align 2 +// CHECK-NEXT: store <8 x i8> [[TMP3]], <8 x i8>* [[SAVED_VALUE1]], align 8 +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE1]] to * +// CHECK-NEXT: [[TMP4:%.*]] = load , * [[CASTFIXEDSVE2]], align 8 // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP4]]) // CHECK-NEXT: store [[TMP5]], * [[PG]], align 2 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[PG]], align 2 @@ -32,11 +38,11 @@ // CHECK-NEXT: [[TMP8:%.*]] = load , * [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP6]]) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE]], [[TMP8]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) -// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[RETVAL]], align 16 +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) +// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 // CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 -// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE1]] +// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP11]], i64 0) +// CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t foo(svbool_t pred, svint32_t vec) { svbool_t pg = svand_z(pred, global_pred, global_pred); @@ -103,3 +109,49 @@ parr = &arr[0]; return *parr; } + +// CHECK-LABEL: @test_cast( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 +// CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca , align 2 +// CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 16 +// CHECK-NEXT: [[XX:%.*]] = alloca <16 x i32>, align 16 +// CHECK-NEXT: [[YY:%.*]] = alloca <16 x i32>, align 16 +// CHECK-NEXT: [[PG:%.*]] = alloca , align 2 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE1:%.*]] = alloca <16 x i32>, align 64 +// CHECK-NEXT: store [[PRED:%.*]], * [[PRED_ADDR]], align 2 +// CHECK-NEXT: store [[VEC:%.*]], * [[VEC_ADDR]], align 16 +// CHECK-NEXT: store <16 x i32> , <16 x i32>* [[XX]], align 16 +// CHECK-NEXT: store <16 x i32> , <16 x i32>* [[YY]], align 16 +// CHECK-NEXT: [[TMP0:%.*]] = load , * [[PRED_ADDR]], align 2 +// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2 +// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[SAVED_VALUE]], align 8 +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[CASTFIXEDSVE]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[XX]], align 16 +// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[YY]], align 16 +// CHECK-NEXT: [[ADD:%.*]] = add <16 x i32> [[TMP3]], [[TMP4]] +// CHECK-NEXT: store <16 x i32> [[ADD]], <16 x i32>* [[SAVED_VALUE1]], align 64 +// CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = bitcast <16 x i32>* [[SAVED_VALUE1]] to * +// CHECK-NEXT: [[TMP5:%.*]] = load , * [[CASTFIXEDSVE2]], align 64 +// CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP5]]) +// CHECK-NEXT: store [[TMP6]], * [[PG]], align 2 +// CHECK-NEXT: [[TMP7:%.*]] = load , * [[PG]], align 2 +// CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* @global_vec, align 16 +// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP8]], i64 0) +// CHECK-NEXT: [[TMP9:%.*]] = load , * [[VEC_ADDR]], align 16 +// CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP7]]) +// CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP10]], [[CASTSCALABLESVE]], [[TMP9]]) +// CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) +// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], <16 x i32>* [[RETVAL]], align 16 +// CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, <16 x i32>* [[RETVAL]], align 16 +// CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP12]], i64 0) +// CHECK-NEXT: ret [[CASTSCALABLESVE4]] +// +fixed_int32_t test_cast(svbool_t pred, svint32_t vec) { + fixed_int32_t xx = {1, 2, 3, 4}; + fixed_int32_t yy = {2, 5, 4, 6}; + svbool_t pg = svand_z(pred, global_pred, xx + yy); + return svadd_m(pg, global_vec, vec); +} diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c @@ -22,13 +22,13 @@ // CHECK-128-LABEL: @write_global_i64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) -// CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* @global_i64, align 16, [[TBAA6:!tbaa !.*]] +// CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_i64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i64> @llvm.experimental.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) -// CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* @global_i64, align 16, [[TBAA6:!tbaa !.*]] +// CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-512-NEXT: ret void // void write_global_i64(svint64_t v) { global_i64 = v; } @@ -36,33 +36,33 @@ // CHECK-128-LABEL: @write_global_bf16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x bfloat> @llvm.experimental.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) -// CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* @global_bf16, align 16, [[TBAA6]] +// CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bf16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = call <32 x bfloat> @llvm.experimental.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) -// CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* @global_bf16, align 16, [[TBAA6]] +// CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_global_bf16(svbfloat16_t v) { global_bf16 = v; } // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA9:!tbaa !.*]] -// CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <2 x i8>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA6]] -// CHECK-128-NEXT: store <2 x i8> [[TMP1]], <2 x i8>* @global_bool, align 2, [[TBAA6]] +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 +// CHECK-128-NEXT: store [[V:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <2 x i8>* +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA9:!tbaa !.*]] -// CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <8 x i8>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, [[TBAA6]] -// CHECK-512-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* @global_bool, align 2, [[TBAA6]] +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 +// CHECK-512-NEXT: store [[V:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_global_bool(svbool_t v) { global_bool = v; } @@ -73,13 +73,13 @@ // CHECK-128-LABEL: @read_global_i64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* @global_i64, align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, <2 x i64>* @global_i64, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_i64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* @global_i64, align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, <8 x i64>* @global_i64, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // @@ -87,13 +87,13 @@ // CHECK-128-LABEL: @read_global_bf16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* @global_bf16, align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, <8 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_bf16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* @global_bf16, align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, <32 x bfloat>* @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // @@ -101,12 +101,20 @@ // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<2 x i8>* @global_bool to *), align 2, [[TBAA6]] -// CHECK-128-NEXT: ret [[TMP0]] +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 16 +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] +// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <2 x i8>* [[SAVED_VALUE]] to * +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x i8>* @global_bool to *), align 2, [[TBAA6]] -// CHECK-512-NEXT: ret [[TMP0]] +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] +// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_global_bool() { return global_bool; }