diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_dupq.c @@ -568,7 +568,7 @@ // CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) // CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP17]], i64 0) -// CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], zeroinitializer) // CHECK-NEXT: ret [[TMP19]] // // CPP-CHECK-LABEL: @_Z16test_svdupq_n_b8bbbbbbbbbbbbbbbb( @@ -608,7 +608,7 @@ // CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) // CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v16i8( undef, <16 x i8> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv16i8( [[TMP17]], i64 0) -// CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv16i8( [[TMP16]], [[TMP18]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP19]] // svbool_t test_svdupq_n_b8(bool x0, bool x1, bool x2, bool x3, @@ -641,7 +641,7 @@ // CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) // CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) // CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP17]], i64 0) -// CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], zeroinitializer) // CHECK-NEXT: [[TMP20:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( [[TMP19]]) // CHECK-NEXT: ret [[TMP20]] // @@ -666,7 +666,7 @@ // CPP-CHECK-NEXT: [[TMP16:%.*]] = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) // CPP-CHECK-NEXT: [[TMP17:%.*]] = call @llvm.experimental.vector.insert.nxv8i16.v8i16( undef, <8 x i16> [[TMP15]], i64 0) // CPP-CHECK-NEXT: [[TMP18:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv8i16( [[TMP17]], i64 0) -// CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CPP-CHECK-NEXT: [[TMP19:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv8i16( [[TMP16]], [[TMP18]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP20:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv8i1( [[TMP19]]) // CPP-CHECK-NEXT: ret [[TMP20]] // @@ -690,7 +690,7 @@ // CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP9]], i64 0) -// CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], zeroinitializer) // CHECK-NEXT: [[TMP12:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( [[TMP11]]) // CHECK-NEXT: ret [[TMP12]] // @@ -707,7 +707,7 @@ // CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) // CPP-CHECK-NEXT: [[TMP9:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v4i32( undef, <4 x i32> [[TMP7]], i64 0) // CPP-CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv4i32( [[TMP9]], i64 0) -// CPP-CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CPP-CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.cmpne.wide.nxv4i32( [[TMP8]], [[TMP10]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP12:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv4i1( [[TMP11]]) // CPP-CHECK-NEXT: ret [[TMP12]] // @@ -726,7 +726,7 @@ // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP5]], i64 0) -// CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], zeroinitializer) // CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( [[TMP7]]) // CHECK-NEXT: ret [[TMP8]] // @@ -739,7 +739,7 @@ // CPP-CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) // CPP-CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP3]], i64 0) // CPP-CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.dupq.lane.nxv2i64( [[TMP5]], i64 0) -// CPP-CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer)) +// CPP-CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.cmpne.nxv2i64( [[TMP4]], [[TMP6]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP8:%.*]] = call @llvm.aarch64.sve.convert.to.svbool.nxv2i1( [[TMP7]]) // CPP-CHECK-NEXT: ret [[TMP8]] // diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -731,12 +731,16 @@ // If the mask is all zeros this is a splat, no need to go through all // elements. - if (all_of(Mask, [](int Elt) { return Elt == 0; }) && - !MaskEltCount.isScalable()) { + if (all_of(Mask, [](int Elt) { return Elt == 0; })) { Type *Ty = IntegerType::get(V1->getContext(), 32); Constant *Elt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, 0)); - return ConstantVector::getSplat(MaskEltCount, Elt); + + if (Elt->isNullValue()) { + auto *VTy = VectorType::get(EltTy, MaskEltCount); + return ConstantAggregateZero::get(VTy); + } else if (!MaskEltCount.isScalable()) + return ConstantVector::getSplat(MaskEltCount, Elt); } // Do not iterate on scalable vector. The num of elements is unknown at // compile-time. diff --git a/llvm/test/Bitcode/vscale-round-trip.ll b/llvm/test/Bitcode/vscale-round-trip.ll --- a/llvm/test/Bitcode/vscale-round-trip.ll +++ b/llvm/test/Bitcode/vscale-round-trip.ll @@ -6,7 +6,7 @@ @important_val = extern_weak dso_local global i32, align 4 ; CHECK-LABEL: define @const_shufflevector( -; CHECK: shufflevector ( +; CHECK: zeroinitializer define @const_shufflevector() { ret shufflevector ( zeroinitializer, @@ -15,7 +15,7 @@ } ; CHECK-LABEL: define @const_shufflevector_ex() -; CHECK: shufflevector ( +; CHECK: zeroinitializer define @const_shufflevector_ex() { ret shufflevector ( zeroinitializer, diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll @@ -121,9 +121,9 @@ ; CHECK-ORDERED: %[[LOAD2:.*]] = load float, float* %[[ARRAYIDX]] ; CHECK-ORDERED: vector.ph ; CHECK-ORDERED: %[[STEPVEC1:.*]] = call @llvm.experimental.stepvector.nxv4i64() -; CHECK-ORDERED: %[[STEPVEC_ADD1:.*]] = add %[[STEPVEC1]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer) +; CHECK-ORDERED: %[[STEPVEC_ADD1:.*]] = add %[[STEPVEC1]], zeroinitializer ; CHECK-ORDERED: %[[STEPVEC_MUL:.*]] = mul %[[STEPVEC_ADD1]], shufflevector ( insertelement ( poison, i64 2, i32 0), poison, zeroinitializer) -; CHECK-ORDERED: %[[INDUCTION:.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), %[[STEPVEC_MUL]] +; CHECK-ORDERED: %[[INDUCTION:.*]] = add zeroinitializer, %[[STEPVEC_MUL]] ; CHECK-ORDERED: vector.body ; CHECK-ORDERED: %[[VEC_PHI2:.*]] = phi float [ %[[LOAD2]], %vector.ph ], [ %[[RDX2:.*]], %vector.body ] ; CHECK-ORDERED: %[[VEC_PHI1:.*]] = phi float [ %[[LOAD1]], %vector.ph ], [ %[[RDX1:.*]], %vector.body ] @@ -147,9 +147,9 @@ ; CHECK-UNORDERED: %[[INS_ELT2:.*]] = insertelement shufflevector ( insertelement ( poison, float -0.000000e+00, i32 0), poison, zeroinitializer), float %[[LOAD2]], i32 0 ; CHECK-UNORDERED: %[[INS_ELT1:.*]] = insertelement shufflevector ( insertelement ( poison, float -0.000000e+00, i32 0), poison, zeroinitializer), float %[[LOAD1]], i32 0 ; CHECK-UNORDERED: %[[STEPVEC1:.*]] = call @llvm.experimental.stepvector.nxv4i64() -; CHECK-UNORDERED: %[[STEPVEC_ADD1:.*]] = add %[[STEPVEC1]], shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer) +; CHECK-UNORDERED: %[[STEPVEC_ADD1:.*]] = add %[[STEPVEC1]], zeroinitializer ; CHECK-UNORDERED: %[[STEPVEC_MUL:.*]] = mul %[[STEPVEC_ADD1]], shufflevector ( insertelement ( poison, i64 2, i32 0), poison, zeroinitializer) -; CHECK-UNORDERED: %[[INDUCTION:.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), %[[STEPVEC_MUL]] +; CHECK-UNORDERED: %[[INDUCTION:.*]] = add zeroinitializer, %[[STEPVEC_MUL]] ; CHECK-UNORDERED: vector.body ; CHECK-UNORDERED: %[[VEC_PHI2:.*]] = phi [ %[[INS_ELT2]], %vector.ph ], [ %[[VEC_FADD2:.*]], %vector.body ] ; CHECK-UNORDERED: %[[VEC_PHI1:.*]] = phi [ %[[INS_ELT1]], %vector.ph ], [ %[[VEC_FADD1:.*]], %vector.body ] @@ -273,7 +273,7 @@ ; CHECK-ORDERED: vector.body ; CHECK-ORDERED: %[[VEC_PHI:.*]] = phi float [ 1.000000e+00, %vector.ph ], [ %[[RDX:.*]], %vector.body ] ; CHECK-ORDERED: %[[LOAD:.*]] = load , * -; CHECK-ORDERED: %[[FCMP:.*]] = fcmp une %[[LOAD]], shufflevector ( insertelement ( poison, float 0.000000e+00, i32 0), poison, zeroinitializer) +; CHECK-ORDERED: %[[FCMP:.*]] = fcmp une %[[LOAD]], zeroinitializer ; CHECK-ORDERED: %[[MASKED_LOAD:.*]] = call @llvm.masked.load.nxv4f32.p0nxv4f32(* {{.*}}, i32 4, %[[FCMP]], poison) ; CHECK-ORDERED: %[[XOR:.*]] = xor %[[FCMP]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) ; CHECK-ORDERED: %[[SELECT:.*]] = select %[[XOR]], shufflevector ( insertelement ( poison, float 3.000000e+00, i32 0), poison, zeroinitializer), %[[MASKED_LOAD]] @@ -295,7 +295,7 @@ ; CHECK-UNORDERED: vector.body ; CHECK-UNORDERED: %[[VEC_PHI:.*]] = phi [ insertelement ( shufflevector ( insertelement ( poison, float -0.000000e+00, i32 0), poison, zeroinitializer), float 1.000000e+00, i32 0), %vector.ph ], [ %[[VEC_FADD:.*]], %vector.body ] ; CHECK-UNORDERED: %[[LOAD1:.*]] = load , * -; CHECK-UNORDERED: %[[FCMP:.*]] = fcmp une %[[LOAD1]], shufflevector ( insertelement ( poison, float 0.000000e+00, i32 0), poison, zeroinitializer) +; CHECK-UNORDERED: %[[FCMP:.*]] = fcmp une %[[LOAD1]], zeroinitializer ; CHECK-UNORDERED: %[[MASKED_LOAD:.*]] = call @llvm.masked.load.nxv4f32.p0nxv4f32(* {{.*}}, i32 4, %[[FCMP]], poison) ; CHECK-UNORDERED: %[[XOR:.*]] = xor %[[FCMP]], shufflevector ( insertelement ( poison, i1 true, i32 0), poison, zeroinitializer) ; CHECK-UNORDERED: %[[SELECT:.*]] = select %[[XOR]], shufflevector ( insertelement ( poison, float 3.000000e+00, i32 0), poison, zeroinitializer), %[[MASKED_LOAD]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-basic-vec.ll @@ -9,7 +9,7 @@ ; CHECK-NEXT: entry: ; CHECK: vector.body: ; CHECK: [[WIDE_LOAD:%.*]] = load , * {{.*}}, align 4 -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq [[WIDE_LOAD]], zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = select [[TMP1]], shufflevector ( insertelement ( poison, i32 2, i32 0), poison, zeroinitializer), shufflevector ( insertelement ( poison, i32 10, i32 0), poison, zeroinitializer) ; CHECK: store [[TMP2]], * {{.*}}, align 4 ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll @@ -83,7 +83,7 @@ ; CHECK-NEXT: %[[SPLATINS:.*]] = insertelement poison, i32* %[[GEP]], i32 0 ; CHECK-NEXT: %[[SPLAT:.*]] = shufflevector %[[SPLATINS]], poison, zeroinitializer ; CHECK: %[[LOAD:.*]] = load , * -; CHECK-NEXT: %[[ICMP:.*]] = icmp ne %[[LOAD]], shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer) +; CHECK-NEXT: %[[ICMP:.*]] = icmp ne %[[LOAD]], zeroinitializer ; CHECK: %[[MASKED_LOAD:.*]] = call @llvm.masked.load.nxv4i32.p0nxv4i32(* %[[BITCAST:.*]], i32 4, %[[ICMP]], poison) ; CHECK-NEXT: %[[MASKED_GATHER:.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0i32( %[[SPLAT]], i32 4, %[[ICMP]], undef) ; CHECK-NEXT: %[[ADD:.*]] = add nsw %[[MASKED_GATHER]], %[[MASKED_LOAD]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inv-store.ll @@ -57,7 +57,7 @@ ; CHECK-NEXT: %[[SPLAT_PTRS:.*]] = shufflevector %[[TMP1]], poison, zeroinitializer ; CHECK: vector.body: ; CHECK: %[[VECLOAD:.*]] = load , * %{{.*}}, align 4 -; CHECK-NEXT: %[[MASK:.*]] = icmp sgt %[[VECLOAD]], shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer) +; CHECK-NEXT: %[[MASK:.*]] = icmp sgt %[[VECLOAD]], zeroinitializer ; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0i32( %[[VECLOAD]], %[[SPLAT_PTRS]], i32 4, %[[MASK]]) entry: br label %for.body @@ -104,7 +104,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.stepvector.nxv2i64() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = add zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = add [[DOTSPLAT]], [[TMP6]] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i64, i64* [[START]], [[TMP7]] ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-select-cmp.ll @@ -154,7 +154,7 @@ define i32 @pred_select_const_i32_from_icmp(i32* noalias nocapture readonly %src1, i32* noalias nocapture readonly %src2, i64 %n) #0 { ; CHECK-VF4IC1-LABEL: @pred_select_const_i32_from_icmp ; CHECK-VF4IC1: vector.body: -; CHECK-VF4IC1: [[VEC_PHI:%.*]] = phi [ shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer), %vector.ph ], [ [[VEC_SEL:%.*]], %vector.body ] +; CHECK-VF4IC1: [[VEC_PHI:%.*]] = phi [ zeroinitializer, %vector.ph ], [ [[VEC_SEL:%.*]], %vector.body ] ; CHECK-VF4IC1: [[VEC_LOAD:%.*]] = load ; CHECK-VF4IC1: [[MASK:%.*]] = icmp sgt [[VEC_LOAD]], shufflevector ( insertelement ( poison, i32 35, i32 0), poison, zeroinitializer) ; CHECK-VF4IC1: [[MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i32.p0nxv4i32(* {{%.*}}, i32 4, [[MASK]], poison) @@ -162,7 +162,7 @@ ; CHECK-VF4IC1-NEXT: [[VEC_SEL_TMP:%.*]] = select [[VEC_ICMP]], shufflevector ( insertelement ( poison, i32 1, i32 0), poison, zeroinitializer), [[VEC_PHI]] ; CHECK-VF4IC1: [[VEC_SEL:%.*]] = select [[MASK]], [[VEC_SEL_TMP]], [[VEC_PHI]] ; CHECK-VF4IC1: middle.block: -; CHECK-VF4IC1-NEXT: [[FIN_ICMP:%.*]] = icmp ne [[VEC_SEL]], shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer) +; CHECK-VF4IC1-NEXT: [[FIN_ICMP:%.*]] = icmp ne [[VEC_SEL]], zeroinitializer ; CHECK-VF4IC1-NEXT: [[OR_RDX:%.*]] = call i1 @llvm.vector.reduce.or.nxv4i1( [[FIN_ICMP]]) ; CHECK-VF4IC1-NEXT: {{.*}} = select i1 [[OR_RDX]], i32 1, i32 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll @@ -47,7 +47,7 @@ ; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.experimental.stepvector.nxv2i64() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), [[TMP6]] +; CHECK-NEXT: [[TMP7:%.*]] = add zeroinitializer, [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = add [[DOTSPLAT]], [[TMP7]] ; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, i8* [[START_2]], [[TMP8]] ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 @@ -126,7 +126,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.stepvector.nxv2i64() ; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX1]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = add shufflevector ( insertelement ( poison, i64 0, i32 0), poison, zeroinitializer), [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = add zeroinitializer, [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = add [[DOTSPLAT]], [[TMP6]] ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, i8* [[START]], [[TMP7]] ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll --- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll @@ -143,7 +143,7 @@ ; CHECK: %[[STEPVEC:.*]] = call @llvm.experimental.stepvector.nxv4i32() ; CHECK-NEXT: %[[TMP1:.*]] = uitofp %[[STEPVEC]] to ; CHECK-NEXT: %[[TMP2:.*]] = fmul %[[TMP1]], shufflevector ( insertelement ( poison, float 2.000000e+00, i32 0), poison, zeroinitializer) -; CHECK-NEXT: %[[INDINIT:.*]] = fadd %[[TMP2]], shufflevector ( insertelement ( poison, float 0.000000e+00, i32 0), poison, zeroinitializer) +; CHECK-NEXT: %[[INDINIT:.*]] = fadd %[[TMP2]], zeroinitializer ; CHECK-NEXT: %[[VSCALE:.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: %[[TMP3:.*]] = shl i32 %8, 2 ; CHECK-NEXT: %[[TMP4:.*]] = uitofp i32 %[[TMP3]] to float diff --git a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll --- a/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-reduction-inloop.ll @@ -7,8 +7,8 @@ ; CHECK-LABEL: @reduction_add_trunc( ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ insertelement ( shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer), i32 255, i32 0), %vector.ph ], [ [[TMP34:%.*]], %vector.body ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi [ shufflevector ( insertelement ( poison, i32 0, i32 0), poison, zeroinitializer), %vector.ph ], [ [[TMP36:%.*]], %vector.body ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ insertelement ( zeroinitializer, i32 255, i32 0), %vector.ph ], [ [[TMP34:%.*]], %vector.body ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi [ zeroinitializer, %vector.ph ], [ [[TMP36:%.*]], %vector.body ] ; CHECK: [[TMP14:%.*]] = and [[VEC_PHI]], shufflevector ( insertelement ( poison, i32 255, i32 0), poison, zeroinitializer) ; CHECK-NEXT: [[TMP15:%.*]] = and [[VEC_PHI1]], shufflevector ( insertelement ( poison, i32 255, i32 0), poison, zeroinitializer) ; CHECK: [[WIDE_LOAD:%.*]] = load , *