diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -11337,14 +11337,47 @@ } } if (VectorizedTree) { + // Reorder operands of bool logical op in the natural order to avoid + // possible problem with poison propagation. If not possible to reorder + // (both operands are originally RHS), emit an extra freeze instruction + // for the LHS operand. + //I.e., if we have original code like this: + // RedOp1 = select i1 ?, i1 LHS, i1 false + // RedOp2 = select i1 RHS, i1 ?, i1 false + + // Then, we swap LHS/RHS to create a new op that matches the poison + // semantics of the original code. + + // If we have original code like this and both values could be poison: + // RedOp1 = select i1 ?, i1 LHS, i1 false + // RedOp2 = select i1 ?, i1 RHS, i1 false + + // Then, we must freeze LHS in the new op. + auto &&FixBoolLogicalOps = [&Builder, VectorizedTree]( + Value *&LHS, Value *&RHS, + Instruction *RedOp1, Instruction *RedOp2) { + if (!isa(RedOp1) || !isBoolLogicOp(RedOp1)) + return; + if (LHS == VectorizedTree || getRdxOperand(RedOp1, 0) == LHS || + isGuaranteedNotToBePoison(LHS)) + return; + if (!isa(RedOp2) || !isBoolLogicOp(RedOp2)) + return; + if (RHS == VectorizedTree || getRdxOperand(RedOp2, 0) == RHS || + isGuaranteedNotToBePoison(RHS)) { + std::swap(LHS, RHS); + return; + } + LHS = Builder.CreateFreeze(LHS); + }; // Finish the reduction. // Need to add extra arguments and not vectorized possible reduction // values. // Try to avoid dependencies between the scalar remainders after // reductions. auto &&FinalGen = - [this, &Builder, - &TrackedVals](ArrayRef> InstVals) { + [this, &Builder, &TrackedVals, &FixBoolLogicalOps]( + ArrayRef> InstVals) { unsigned Sz = InstVals.size(); SmallVector> ExtraReds(Sz / 2 + Sz % 2); @@ -11361,6 +11394,11 @@ auto It2 = TrackedVals.find(RdxVal2); if (It2 != TrackedVals.end()) StableRdxVal2 = It2->second; + // To prevent poison from leaking across what used to be + // sequential, safe, scalar boolean logic operations, the + // reduction operand must be frozen. + FixBoolLogicalOps(StableRdxVal1, StableRdxVal2, InstVals[I].first, + RedOp); Value *ExtraRed = createOp(Builder, RdxKind, StableRdxVal1, StableRdxVal2, "op.rdx", ReductionOps); ExtraReds[I / 2] = std::make_pair(InstVals[I].first, ExtraRed); @@ -11370,6 +11408,11 @@ return ExtraReds; }; SmallVector> ExtraReductions; + bool IsBoolLogicalOp = isa(ReductionRoot) && + isBoolLogicOp(cast(ReductionRoot)); + if (IsBoolLogicalOp) + ExtraReductions.emplace_back(cast(ReductionRoot), + VectorizedTree); SmallPtrSet Visited; for (ArrayRef Candidates : ReducedVals) { for (Value *RdxVal : Candidates) { @@ -11387,24 +11430,17 @@ for (auto *I : Pair.second) ExtraReductions.emplace_back(I, Pair.first); } + if (!IsBoolLogicalOp) + ExtraReductions.emplace_back(cast(ReductionRoot), + VectorizedTree); // Iterate through all not-vectorized reduction values/extra arguments. while (ExtraReductions.size() > 1) { + VectorizedTree = ExtraReductions.front().second; SmallVector> NewReds = FinalGen(ExtraReductions); ExtraReductions.swap(NewReds); } - // Final reduction. - if (ExtraReductions.size() == 1) { - Instruction *RedOp = ExtraReductions.back().first; - Builder.SetCurrentDebugLocation(RedOp->getDebugLoc()); - Value *RdxVal = ExtraReductions.back().second; - Value *StableRdxVal = RdxVal; - auto It = TrackedVals.find(RdxVal); - if (It != TrackedVals.end()) - StableRdxVal = It->second; - VectorizedTree = createOp(Builder, RdxKind, VectorizedTree, - StableRdxVal, "op.rdx", ReductionOps); - } + VectorizedTree = ExtraReductions.front().second; ReductionRoot->replaceAllUsesWith(VectorizedTree); diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll --- a/llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/gather-root.ll @@ -18,7 +18,7 @@ ; DEFAULT-NEXT: [[P17:%.*]] = phi i32 [ [[OP_RDX:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; DEFAULT-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> , <8 x i32> ; DEFAULT-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; DEFAULT-NEXT: [[OP_RDX]] = add i32 [[TMP3]], [[P17]] +; DEFAULT-NEXT: [[OP_RDX]] = add i32 [[P17]], [[TMP3]] ; DEFAULT-NEXT: br label [[FOR_BODY]] ; ; GATHER-LABEL: @PR28330( @@ -30,7 +30,7 @@ ; GATHER-NEXT: [[P17:%.*]] = phi i32 [ [[OP_RDX:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; GATHER-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> , <8 x i32> ; GATHER-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; GATHER-NEXT: [[OP_RDX]] = add i32 [[TMP3]], [[P17]] +; GATHER-NEXT: [[OP_RDX]] = add i32 [[P17]], [[TMP3]] ; GATHER-NEXT: br label [[FOR_BODY]] ; ; MAX-COST-LABEL: @PR28330( @@ -42,7 +42,7 @@ ; MAX-COST-NEXT: [[P17:%.*]] = phi i32 [ [[OP_RDX:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; MAX-COST-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> , <8 x i32> ; MAX-COST-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; MAX-COST-NEXT: [[OP_RDX]] = add i32 [[TMP3]], [[P17]] +; MAX-COST-NEXT: [[OP_RDX]] = add i32 [[P17]], [[TMP3]] ; MAX-COST-NEXT: br label [[FOR_BODY]] ; entry: @@ -95,7 +95,7 @@ ; DEFAULT-NEXT: [[P17:%.*]] = phi i32 [ [[OP_RDX:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; DEFAULT-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> , <8 x i32> ; DEFAULT-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; DEFAULT-NEXT: [[OP_RDX]] = add i32 [[TMP3]], -5 +; DEFAULT-NEXT: [[OP_RDX]] = add i32 -5, [[TMP3]] ; DEFAULT-NEXT: br label [[FOR_BODY]] ; ; GATHER-LABEL: @PR32038( @@ -107,7 +107,7 @@ ; GATHER-NEXT: [[P17:%.*]] = phi i32 [ [[OP_RDX:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; GATHER-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> , <8 x i32> ; GATHER-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; GATHER-NEXT: [[OP_RDX]] = add i32 [[TMP3]], -5 +; GATHER-NEXT: [[OP_RDX]] = add i32 -5, [[TMP3]] ; GATHER-NEXT: br label [[FOR_BODY]] ; ; MAX-COST-LABEL: @PR32038( @@ -119,7 +119,7 @@ ; MAX-COST-NEXT: [[P17:%.*]] = phi i32 [ [[OP_RDX:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; MAX-COST-NEXT: [[TMP2:%.*]] = select <8 x i1> [[TMP1]], <8 x i32> , <8 x i32> ; MAX-COST-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; MAX-COST-NEXT: [[OP_RDX]] = add i32 [[TMP3]], -5 +; MAX-COST-NEXT: [[OP_RDX]] = add i32 -5, [[TMP3]] ; MAX-COST-NEXT: br label [[FOR_BODY]] ; entry: diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll --- a/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/horizontal.ll @@ -41,7 +41,7 @@ ; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[TMP6]], <4 x i32> [[TMP4]] ; CHECK-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP8]], [[S_026]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[S_026]], [[TMP8]] ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i32, i32* [[P1_023]], i64 [[IDX_EXT]] ; CHECK-NEXT: [[ADD_PTR29]] = getelementptr inbounds i32, i32* [[P2_024]], i64 [[IDX_EXT]] ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[J_025]], 1 @@ -158,7 +158,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = mul nsw <4 x i32> [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP5]], [[S_020]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[S_020]], [[TMP5]] ; CHECK-NEXT: [[CMP14:%.*]] = icmp slt i32 [[OP_RDX]], [[LIM:%.*]] ; CHECK-NEXT: br i1 [[CMP14]], label [[IF_END]], label [[FOR_END_LOOPEXIT:%.*]] ; CHECK: if.end: @@ -260,7 +260,7 @@ ; CHECK-NEXT: [[TMP8:%.*]] = sub nsw <8 x i32> zeroinitializer, [[TMP6]] ; CHECK-NEXT: [[TMP9:%.*]] = select <8 x i1> [[TMP7]], <8 x i32> [[TMP8]], <8 x i32> [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP9]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP10]], [[S_047]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[S_047]], [[TMP10]] ; CHECK-NEXT: [[CMP83:%.*]] = icmp slt i32 [[OP_RDX]], [[LIM:%.*]] ; CHECK-NEXT: br i1 [[CMP83]], label [[IF_END_86]], label [[FOR_END_LOOPEXIT:%.*]] ; CHECK: if.end.86: diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-and-reduction.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-and-reduction.ll --- a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-and-reduction.ll +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-and-reduction.ll @@ -14,8 +14,8 @@ ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[TMP2]], align 1 ; CHECK-NEXT: [[TMP4:%.*]] = xor <8 x i8> [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP4]]) -; CHECK-NEXT: [[OP_EXTRA:%.*]] = and i8 [[TMP5]], 1 -; CHECK-NEXT: ret i8 [[OP_EXTRA]] +; CHECK-NEXT: [[OP_RDX:%.*]] = and i8 1, [[TMP5]] +; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 0 diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-xor-reduction.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-xor-reduction.ll --- a/llvm/test/Transforms/SLPVectorizer/AArch64/slp-xor-reduction.ll +++ b/llvm/test/Transforms/SLPVectorizer/AArch64/slp-xor-reduction.ll @@ -14,8 +14,8 @@ ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[TMP2]], align 1 ; CHECK-NEXT: [[TMP4:%.*]] = and <8 x i8> [[TMP3]], [[TMP1]] ; CHECK-NEXT: [[TMP5:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP4]]) -; CHECK-NEXT: [[OP_EXTRA:%.*]] = xor i8 [[TMP5]], 1 -; CHECK-NEXT: ret i8 [[OP_EXTRA]] +; CHECK-NEXT: [[OP_RDX:%.*]] = xor i8 1, [[TMP5]] +; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: %arrayidx = getelementptr inbounds %struct.buf, %struct.buf* %a, i64 0, i32 0, i64 0 diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll --- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll +++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/horizontal-store.ll @@ -22,8 +22,8 @@ ; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i32 [[TMP2]], i32 [[TMP3]] ; GFX9-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2) to <4 x i32>*), align 8 ; GFX9-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP4]]) -; GFX9-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP5]], [[SELECT1]] -; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP5]], i32 [[SELECT1]] +; GFX9-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[SELECT1]], [[TMP5]] +; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[SELECT1]], i32 [[TMP5]] ; GFX9-NEXT: [[STORE_SELECT:%.*]] = select i1 [[CMP1]], i32 3, i32 4 ; GFX9-NEXT: store i32 [[STORE_SELECT]], i32* @var, align 8 ; GFX9-NEXT: ret i32 [[OP_RDX1]] @@ -63,8 +63,8 @@ ; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i64 [[TMP2]], i64 [[TMP3]] ; GFX9-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* bitcast (i64* getelementptr inbounds ([32 x i64], [32 x i64]* @arr64, i64 0, i64 2) to <4 x i64>*), align 16 ; GFX9-NEXT: [[TMP5:%.*]] = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> [[TMP4]]) -; GFX9-NEXT: [[OP_RDX:%.*]] = icmp slt i64 [[TMP5]], [[SELECT1]] -; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i64 [[TMP5]], i64 [[SELECT1]] +; GFX9-NEXT: [[OP_RDX:%.*]] = icmp slt i64 [[SELECT1]], [[TMP5]] +; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i64 [[SELECT1]], i64 [[TMP5]] ; GFX9-NEXT: [[STORE_SELECT:%.*]] = select i1 [[CMP1]], i64 3, i64 4 ; GFX9-NEXT: store i64 [[STORE_SELECT]], i64* @var64, align 8 ; GFX9-NEXT: ret i64 [[OP_RDX1]] @@ -206,8 +206,8 @@ ; GFX9-NEXT: [[SELECT1:%.*]] = select i1 [[CMP1]], i32 [[EX0]], i32 [[V1]] ; GFX9-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2) to <4 x i32>*), align 8 ; GFX9-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]]) -; GFX9-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP3]], [[SELECT1]] -; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP3]], i32 [[SELECT1]] +; GFX9-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[SELECT1]], [[TMP3]] +; GFX9-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[SELECT1]], i32 [[TMP3]] ; GFX9-NEXT: [[STOREVAL:%.*]] = select i1 [[CMP1]], i32 3, i32 4 ; GFX9-NEXT: store i32 [[STOREVAL]], i32* @var, align 8 ; GFX9-NEXT: ret i32 [[OP_RDX1]] diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll --- a/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll +++ b/llvm/test/Transforms/SLPVectorizer/RISCV/reductions.ll @@ -152,7 +152,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.and.v8i8(<8 x i8> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = and i8 [[TMP3]], 1 +; CHECK-NEXT: [[OP_RDX:%.*]] = and i8 1, [[TMP3]] ; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: @@ -279,7 +279,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.xor.v8i8(<8 x i8> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = xor i8 [[TMP3]], 1 +; CHECK-NEXT: [[OP_RDX:%.*]] = xor i8 1, [[TMP3]] ; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: @@ -345,7 +345,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[ARRAYIDX3]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i8> [[TMP1]], [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = add i8 [[TMP3]], 1 +; CHECK-NEXT: [[OP_RDX:%.*]] = add i8 1, [[TMP3]] ; CHECK-NEXT: ret i8 [[OP_RDX]] ; entry: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR35628_1.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR35628_1.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/PR35628_1.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/PR35628_1.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 [[TMP4]], [[TMP3]] ; CHECK-NEXT: [[OP_RDX1:%.*]] = add i32 [[TMP2]], 1 ; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[OP_RDX]], [[OP_RDX1]] -; CHECK-NEXT: [[OP_RDX3]] = add i32 [[TMP7]], [[OP_RDX2]] +; CHECK-NEXT: [[OP_RDX3]] = add i32 [[OP_RDX2]], [[TMP7]] ; CHECK-NEXT: br label [[LOOP]] ; CHECK: bail_out: ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR35628_2.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR35628_2.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/PR35628_2.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/PR35628_2.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: [[TMP6:%.*]] = ashr exact <4 x i64> [[TMP5]], ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[TMP6]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = add i64 [[TMP3]], 0 -; CHECK-NEXT: [[OP_RDX1]] = add i64 [[TMP7]], [[OP_RDX]] +; CHECK-NEXT: [[OP_RDX1]] = add i64 [[OP_RDX]], [[TMP7]] ; CHECK-NEXT: br label [[LOOP]] ; entry: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/PR39774.ll @@ -23,9 +23,9 @@ ; CHECK-NEXT: [[OP_RDX2:%.*]] = and i32 [[TMP0]], [[TMP0]] ; CHECK-NEXT: [[OP_RDX3:%.*]] = and i32 [[TMP0]], [[TMP4]] ; CHECK-NEXT: [[OP_RDX4:%.*]] = and i32 [[OP_RDX2]], [[OP_RDX3]] -; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[OP_RDX1]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[OP_RDX4]], i32 0 ; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i32> [[TMP9]], i32 [[TMP4]], i32 1 -; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> , i32 [[OP_RDX4]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> , i32 [[OP_RDX1]], i32 0 ; CHECK-NEXT: [[TMP12:%.*]] = and <2 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: [[TMP13:%.*]] = add <2 x i32> [[TMP10]], [[TMP11]] ; CHECK-NEXT: [[TMP14]] = shufflevector <2 x i32> [[TMP12]], <2 x i32> [[TMP13]], <2 x i32> @@ -51,7 +51,7 @@ ; FORCE_REDUCTION-NEXT: [[OP_RDX2:%.*]] = and i32 [[TMP0]], [[TMP0]] ; FORCE_REDUCTION-NEXT: [[OP_RDX3:%.*]] = and i32 [[TMP0]], [[TMP4]] ; FORCE_REDUCTION-NEXT: [[OP_RDX4:%.*]] = and i32 [[OP_RDX2]], [[OP_RDX3]] -; FORCE_REDUCTION-NEXT: [[OP_RDX5:%.*]] = and i32 [[OP_RDX1]], [[OP_RDX4]] +; FORCE_REDUCTION-NEXT: [[OP_RDX5:%.*]] = and i32 [[OP_RDX4]], [[OP_RDX1]] ; FORCE_REDUCTION-NEXT: [[VAL_43:%.*]] = add i32 [[TMP4]], 14910 ; FORCE_REDUCTION-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[OP_RDX5]], i32 0 ; FORCE_REDUCTION-NEXT: [[TMP10]] = insertelement <2 x i32> [[TMP9]], i32 [[VAL_43]], i32 1 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/PR40310.ll b/llvm/test/Transforms/SLPVectorizer/X86/PR40310.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/PR40310.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/PR40310.ll @@ -14,7 +14,7 @@ ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <16 x i32> [[SHUFFLE]], i32 0 ; CHECK-NEXT: store atomic i32 [[TMP4]], i32* [[VALS:%.*]] unordered, align 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.and.v16i32(<16 x i32> [[TMP3]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP5]], [[TMP2]] +; CHECK-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP2]], [[TMP5]] ; CHECK-NEXT: [[V44:%.*]] = add i32 [[TMP2]], 16 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x i32> poison, i32 [[OP_RDX]], i32 0 ; CHECK-NEXT: [[TMP7]] = insertelement <2 x i32> [[TMP6]], i32 [[V44]], i32 1 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/alternate-cmp-swapped-pred.ll b/llvm/test/Transforms/SLPVectorizer/X86/alternate-cmp-swapped-pred.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/alternate-cmp-swapped-pred.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/alternate-cmp-swapped-pred.ll @@ -14,8 +14,8 @@ ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <8 x i1> [[TMP4]], <8 x i1> [[TMP5]], <8 x i32> ; CHECK-NEXT: [[TMP7:%.*]] = zext <8 x i1> [[TMP6]] to <8 x i16> ; CHECK-NEXT: [[TMP8:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP7]]) -; CHECK-NEXT: [[OP_EXTRA:%.*]] = add i16 [[TMP8]], 0 -; CHECK-NEXT: ret i16 [[OP_EXTRA]] +; CHECK-NEXT: [[OP_RDX:%.*]] = add i16 0, [[TMP8]] +; CHECK-NEXT: ret i16 [[OP_RDX]] ; entry: %call = load i16, i16* undef, align 2 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll b/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/bool-mask.ll @@ -46,7 +46,7 @@ ; SSE-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_13]], [[OR_14]] ; SSE-NEXT: [[OP_RDX2:%.*]] = or i64 [[OR_15]], [[OR]] ; SSE-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX1]], [[OP_RDX2]] -; SSE-NEXT: [[OP_RDX4:%.*]] = or i64 [[OP_RDX]], [[OP_RDX3]] +; SSE-NEXT: [[OP_RDX4:%.*]] = or i64 [[OP_RDX3]], [[OP_RDX]] ; SSE-NEXT: ret i64 [[OP_RDX4]] ; ; AVX-LABEL: @bitmask_16xi8( @@ -80,7 +80,7 @@ ; AVX-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_13]], [[OR_14]] ; AVX-NEXT: [[OP_RDX2:%.*]] = or i64 [[OR_15]], [[OR]] ; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX1]], [[OP_RDX2]] -; AVX-NEXT: [[OP_RDX4:%.*]] = or i64 [[OP_RDX]], [[OP_RDX3]] +; AVX-NEXT: [[OP_RDX4:%.*]] = or i64 [[OP_RDX3]], [[OP_RDX]] ; AVX-NEXT: ret i64 [[OP_RDX4]] ; ; AVX512-LABEL: @bitmask_16xi8( @@ -112,7 +112,7 @@ ; AVX512-NEXT: [[OP_RDX1:%.*]] = or i64 [[TMP13]], [[TMP14]] ; AVX512-NEXT: [[OP_RDX2:%.*]] = or i64 [[OR_15]], [[OR]] ; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX1]], [[OP_RDX2]] -; AVX512-NEXT: [[OP_RDX4:%.*]] = or i64 [[OP_RDX]], [[OP_RDX3]] +; AVX512-NEXT: [[OP_RDX4:%.*]] = or i64 [[OP_RDX3]], [[OP_RDX]] ; AVX512-NEXT: ret i64 [[OP_RDX4]] ; entry: @@ -223,7 +223,7 @@ ; SSE-NEXT: [[OP_RDX:%.*]] = or i64 [[OR_5]], [[OR_6]] ; SSE-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; SSE-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; SSE-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP7]], [[OP_RDX2]] +; SSE-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP7]] ; SSE-NEXT: ret i64 [[OP_RDX3]] ; ; AVX-LABEL: @bitmask_4xi16( @@ -251,7 +251,7 @@ ; AVX-NEXT: [[OP_RDX:%.*]] = or i64 [[OR_5]], [[OR_6]] ; AVX-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; AVX-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP7]], [[OP_RDX2]] +; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP7]] ; AVX-NEXT: ret i64 [[OP_RDX3]] ; ; AVX512-LABEL: @bitmask_4xi16( @@ -277,7 +277,7 @@ ; AVX512-NEXT: [[OP_RDX:%.*]] = or i64 [[TMP9]], [[TMP10]] ; AVX512-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; AVX512-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP8]], [[OP_RDX2]] +; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP8]] ; AVX512-NEXT: ret i64 [[OP_RDX3]] ; entry: @@ -348,7 +348,7 @@ ; SSE-NEXT: [[OP_RDX:%.*]] = or i64 [[OR_5]], [[OR_6]] ; SSE-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; SSE-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; SSE-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP7]], [[OP_RDX2]] +; SSE-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP7]] ; SSE-NEXT: ret i64 [[OP_RDX3]] ; ; AVX-LABEL: @bitmask_8xi32( @@ -376,7 +376,7 @@ ; AVX-NEXT: [[OP_RDX:%.*]] = or i64 [[OR_5]], [[OR_6]] ; AVX-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; AVX-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP7]], [[OP_RDX2]] +; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP7]] ; AVX-NEXT: ret i64 [[OP_RDX3]] ; ; AVX512-LABEL: @bitmask_8xi32( @@ -402,7 +402,7 @@ ; AVX512-NEXT: [[OP_RDX:%.*]] = or i64 [[TMP9]], [[TMP10]] ; AVX512-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; AVX512-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP8]], [[OP_RDX2]] +; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP8]] ; AVX512-NEXT: ret i64 [[OP_RDX3]] ; entry: @@ -515,7 +515,7 @@ ; SSE4-NEXT: [[OP_RDX:%.*]] = or i64 [[OR_5]], [[OR_6]] ; SSE4-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; SSE4-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; SSE4-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP7]], [[OP_RDX2]] +; SSE4-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP7]] ; SSE4-NEXT: ret i64 [[OP_RDX3]] ; ; AVX-LABEL: @bitmask_8xi64( @@ -543,7 +543,7 @@ ; AVX-NEXT: [[OP_RDX:%.*]] = or i64 [[OR_5]], [[OR_6]] ; AVX-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; AVX-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP7]], [[OP_RDX2]] +; AVX-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP7]] ; AVX-NEXT: ret i64 [[OP_RDX3]] ; ; AVX512-LABEL: @bitmask_8xi64( @@ -569,7 +569,7 @@ ; AVX512-NEXT: [[OP_RDX:%.*]] = or i64 [[TMP9]], [[TMP10]] ; AVX512-NEXT: [[OP_RDX1:%.*]] = or i64 [[OR_7]], [[OR]] ; AVX512-NEXT: [[OP_RDX2:%.*]] = or i64 [[OP_RDX]], [[OP_RDX1]] -; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[TMP8]], [[OP_RDX2]] +; AVX512-NEXT: [[OP_RDX3:%.*]] = or i64 [[OP_RDX2]], [[TMP8]] ; AVX512-NEXT: ret i64 [[OP_RDX3]] ; entry: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll @@ -85,8 +85,8 @@ ; CHECK-NEXT: [[CMP3WRONG:%.*]] = fcmp olt float [[TMP1]], 4.200000e+01 ; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[X]], ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP2]]) -; CHECK-NEXT: [[TMP4:%.*]] = or i1 [[TMP3]], [[CMP3WRONG]] -; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP4]], float -1.000000e+00, float 1.000000e+00 +; CHECK-NEXT: [[OP_RDX:%.*]] = or i1 [[CMP3WRONG]], [[TMP3]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[OP_RDX]], float -1.000000e+00, float 1.000000e+00 ; CHECK-NEXT: ret float [[R]] ; %x0 = extractelement <4 x float> %x, i32 0 @@ -112,8 +112,8 @@ ; CHECK-NEXT: [[CMP3WRONG:%.*]] = fcmp olt float [[TMP1]], 4.200000e+01 ; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[X]], ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP2]]) -; CHECK-NEXT: [[TMP4:%.*]] = or i1 [[TMP3]], [[CMP3WRONG]] -; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP4]], float -1.000000e+00, float 1.000000e+00 +; CHECK-NEXT: [[OP_RDX:%.*]] = or i1 [[CMP3WRONG]], [[TMP3]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[OP_RDX]], float -1.000000e+00, float 1.000000e+00 ; CHECK-NEXT: ret float [[R]] ; %x0 = extractelement <4 x float> %x, i32 0 @@ -139,8 +139,8 @@ ; CHECK-NEXT: [[CMP3WRONG:%.*]] = icmp slt i32 [[TMP1]], 42 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[X]], ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP2]]) -; CHECK-NEXT: [[TMP4:%.*]] = or i1 [[TMP3]], [[CMP3WRONG]] -; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP4]], i32 -1, i32 1 +; CHECK-NEXT: [[OP_RDX:%.*]] = or i1 [[CMP3WRONG]], [[TMP3]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[OP_RDX]], i32 -1, i32 1 ; CHECK-NEXT: ret i32 [[R]] ; %x0 = extractelement <4 x i32> %x, i32 0 @@ -170,8 +170,8 @@ ; CHECK-NEXT: [[CMP3WRONG:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <4 x i32> [[X]], [[Y]] ; CHECK-NEXT: [[TMP4:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP3]]) -; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP4]], [[CMP3WRONG]] -; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP5]], i32 -1, i32 1 +; CHECK-NEXT: [[OP_RDX:%.*]] = or i1 [[CMP3WRONG]], [[TMP4]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[OP_RDX]], i32 -1, i32 1 ; CHECK-NEXT: ret i32 [[R]] ; %x0 = extractelement <4 x i32> %x, i32 0 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll b/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/crash_reordering_undefs.ll @@ -17,9 +17,9 @@ ; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> undef) ; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 undef, [[ADD0]] ; CHECK-NEXT: [[OP_RDX1:%.*]] = add i32 [[ADD2]], [[ADD4]] -; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[OP_RDX]], [[OP_RDX1]] -; CHECK-NEXT: [[OP_RDX3:%.*]] = add i32 [[OP_RDX2]], [[ADD9]] -; CHECK-NEXT: [[OP_RDX4:%.*]] = add i32 [[TMP0]], [[OP_RDX3]] +; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[ADD9]], [[TMP0]] +; CHECK-NEXT: [[OP_RDX3:%.*]] = add i32 [[OP_RDX]], [[OP_RDX1]] +; CHECK-NEXT: [[OP_RDX4:%.*]] = add i32 [[OP_RDX3]], [[OP_RDX2]] ; CHECK-NEXT: ret i32 [[OP_RDX4]] ; entry: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/gather-extractelements-different-bbs.ll b/llvm/test/Transforms/SLPVectorizer/X86/gather-extractelements-different-bbs.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/gather-extractelements-different-bbs.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/gather-extractelements-different-bbs.ll @@ -15,7 +15,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> [[TMP4]], i32 [[TMP3]], i32 1 ; CHECK-NEXT: [[SHUFFLE13:%.*]] = shufflevector <2 x i32> [[TMP5]], <2 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[SHUFFLE13]]) -; CHECK-NEXT: [[OP_RDX14:%.*]] = add i32 [[TMP6]], 0 +; CHECK-NEXT: [[OP_RDX14:%.*]] = add i32 0, [[TMP6]] ; CHECK-NEXT: br label [[BB3:%.*]] ; CHECK: bb2: ; CHECK-NEXT: br label [[BB3]] @@ -28,9 +28,13 @@ ; CHECK-NEXT: [[TMP8:%.*]] = add <4 x i32> [[SHUFFLE]], [[SHUFFLE9]] ; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> zeroinitializer) ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP8]]) -; CHECK-NEXT: [[OP_RDX10:%.*]] = add i32 [[TMP9]], 0 -; CHECK-NEXT: [[OP_RDX11:%.*]] = add i32 [[OP_RDX10]], [[TMP2]] -; CHECK-NEXT: [[OP_RDX12:%.*]] = add i32 [[TMP10]], [[OP_RDX11]] +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> , i32 [[TMP2]], i32 1 +; CHECK-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> poison, i32 [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> [[TMP12]], i32 [[TMP10]], i32 1 +; CHECK-NEXT: [[TMP14:%.*]] = add <2 x i32> [[TMP11]], [[TMP13]] +; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i32> [[TMP14]], i32 0 +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP14]], i32 1 +; CHECK-NEXT: [[OP_RDX12:%.*]] = add i32 [[TMP15]], [[TMP16]] ; CHECK-NEXT: ret i32 [[OP_RDX12]] ; CHECK: bb5: ; CHECK-NEXT: br label [[BB4:%.*]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll @@ -19,7 +19,7 @@ ; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> poison, <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[SHUFFLE]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[CONV]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[TMP4]], [[OP_RDX]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP4]] ; CHECK-NEXT: store float [[OP_RDX1]], float* @res, align 4 ; CHECK-NEXT: ret float [[OP_RDX1]] ; @@ -34,7 +34,7 @@ ; THRESHOLD-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> poison, <8 x i32> ; THRESHOLD-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[SHUFFLE]]) ; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[CONV]] -; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[TMP4]], [[OP_RDX]] +; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP4]] ; THRESHOLD-NEXT: store float [[OP_RDX1]], float* @res, align 4 ; THRESHOLD-NEXT: ret float [[OP_RDX1]] ; @@ -80,7 +80,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP3]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[CONV6]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[TMP4]], [[OP_RDX]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP4]] ; CHECK-NEXT: store float [[OP_RDX1]], float* @res, align 4 ; CHECK-NEXT: ret float [[OP_RDX1]] ; @@ -100,7 +100,7 @@ ; THRESHOLD-NEXT: [[TMP11:%.*]] = extractelement <2 x float> [[TMP6]], i32 0 ; THRESHOLD-NEXT: [[TMP12:%.*]] = extractelement <2 x float> [[TMP6]], i32 1 ; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP11]], [[TMP12]] -; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[TMP10]], [[OP_RDX]] +; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP10]] ; THRESHOLD-NEXT: store float [[OP_RDX1]], float* @res, align 4 ; THRESHOLD-NEXT: ret float [[OP_RDX1]] ; @@ -495,7 +495,7 @@ ; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[X:%.*]] to <32 x float>* ; CHECK-NEXT: [[TMP1:%.*]] = load <32 x float>, <32 x float>* [[TMP0]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v32f32(float -0.000000e+00, <32 x float> [[TMP1]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP2]], [[CONV]] +; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[TMP2]] ; CHECK-NEXT: ret float [[OP_RDX]] ; ; THRESHOLD-LABEL: @f1( @@ -505,7 +505,7 @@ ; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X:%.*]] to <32 x float>* ; THRESHOLD-NEXT: [[TMP1:%.*]] = load <32 x float>, <32 x float>* [[TMP0]], align 4 ; THRESHOLD-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v32f32(float -0.000000e+00, <32 x float> [[TMP1]]) -; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP2]], [[CONV]] +; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[TMP2]] ; THRESHOLD-NEXT: ret float [[OP_RDX]] ; entry: @@ -631,7 +631,7 @@ ; CHECK-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]]) ; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[TMP10]] ; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[TMP6]], [[TMP7]] -; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX1]], [[OP_RDX2]] +; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX2]], [[OP_RDX1]] ; CHECK-NEXT: ret float [[OP_RDX3]] ; ; THRESHOLD-LABEL: @loadadd31( @@ -653,10 +653,10 @@ ; THRESHOLD-NEXT: [[TMP9:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP3]]) ; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP8]], [[TMP9]] ; THRESHOLD-NEXT: [[TMP10:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]]) -; THRESHOLD-NEXT: [[TMP11:%.*]] = insertelement <2 x float> poison, float [[OP_RDX]], i32 0 -; THRESHOLD-NEXT: [[TMP12:%.*]] = insertelement <2 x float> [[TMP11]], float [[TMP6]], i32 1 -; THRESHOLD-NEXT: [[TMP13:%.*]] = insertelement <2 x float> poison, float [[TMP10]], i32 0 -; THRESHOLD-NEXT: [[TMP14:%.*]] = insertelement <2 x float> [[TMP13]], float [[TMP7]], i32 1 +; THRESHOLD-NEXT: [[TMP11:%.*]] = insertelement <2 x float> poison, float [[TMP6]], i32 0 +; THRESHOLD-NEXT: [[TMP12:%.*]] = insertelement <2 x float> [[TMP11]], float [[OP_RDX]], i32 1 +; THRESHOLD-NEXT: [[TMP13:%.*]] = insertelement <2 x float> poison, float [[TMP7]], i32 0 +; THRESHOLD-NEXT: [[TMP14:%.*]] = insertelement <2 x float> [[TMP13]], float [[TMP10]], i32 1 ; THRESHOLD-NEXT: [[TMP15:%.*]] = fadd fast <2 x float> [[TMP12]], [[TMP14]] ; THRESHOLD-NEXT: [[TMP16:%.*]] = extractelement <2 x float> [[TMP15]], i32 0 ; THRESHOLD-NEXT: [[TMP17:%.*]] = extractelement <2 x float> [[TMP15]], i32 1 @@ -765,8 +765,8 @@ ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP1]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[CONV]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], 3.000000e+00 -; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[TMP2]], [[OP_RDX1]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float 3.000000e+00, [[TMP2]] +; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[OP_RDX]], [[OP_RDX1]] ; CHECK-NEXT: ret float [[OP_RDX2]] ; ; THRESHOLD-LABEL: @extra_args( @@ -776,9 +776,13 @@ ; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X:%.*]] to <8 x float>* ; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4 ; THRESHOLD-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP1]]) -; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[CONV]] -; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], 3.000000e+00 -; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[TMP2]], [[OP_RDX1]] +; THRESHOLD-NEXT: [[TMP3:%.*]] = insertelement <2 x float> , float [[CONV]], i32 0 +; THRESHOLD-NEXT: [[TMP4:%.*]] = insertelement <2 x float> poison, float [[CONV]], i32 0 +; THRESHOLD-NEXT: [[TMP5:%.*]] = insertelement <2 x float> [[TMP4]], float [[TMP2]], i32 1 +; THRESHOLD-NEXT: [[TMP6:%.*]] = fadd fast <2 x float> [[TMP3]], [[TMP5]] +; THRESHOLD-NEXT: [[TMP7:%.*]] = extractelement <2 x float> [[TMP6]], i32 0 +; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP6]], i32 1 +; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[TMP7]], [[TMP8]] ; THRESHOLD-NEXT: ret float [[OP_RDX2]] ; entry: @@ -821,9 +825,9 @@ ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP1]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float 3.000000e+00, [[CONV]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float 1.000000e+01, [[OP_RDX]] -; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[OP_RDX1]], [[CONV]] -; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[TMP2]], [[OP_RDX2]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[CONV]], [[TMP2]] +; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float 1.000000e+01, [[OP_RDX]] +; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX2]], [[OP_RDX1]] ; CHECK-NEXT: ret float [[OP_RDX3]] ; ; THRESHOLD-LABEL: @extra_args_same_several_times( @@ -833,10 +837,14 @@ ; THRESHOLD-NEXT: [[TMP0:%.*]] = bitcast float* [[X:%.*]] to <8 x float>* ; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, <8 x float>* [[TMP0]], align 4 ; THRESHOLD-NEXT: [[TMP2:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP1]]) -; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float 3.000000e+00, [[CONV]] -; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float 1.000000e+01, [[OP_RDX]] -; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[OP_RDX1]], [[CONV]] -; THRESHOLD-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[TMP2]], [[OP_RDX2]] +; THRESHOLD-NEXT: [[TMP3:%.*]] = insertelement <2 x float> poison, float [[CONV]], i32 0 +; THRESHOLD-NEXT: [[TMP4:%.*]] = insertelement <2 x float> [[TMP3]], float [[CONV]], i32 1 +; THRESHOLD-NEXT: [[TMP5:%.*]] = insertelement <2 x float> , float [[TMP2]], i32 1 +; THRESHOLD-NEXT: [[TMP6:%.*]] = fadd fast <2 x float> [[TMP4]], [[TMP5]] +; THRESHOLD-NEXT: [[TMP7:%.*]] = extractelement <2 x float> [[TMP6]], i32 0 +; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = fadd fast float 1.000000e+01, [[TMP7]] +; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP6]], i32 1 +; THRESHOLD-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX2]], [[TMP8]] ; THRESHOLD-NEXT: ret float [[OP_RDX3]] ; entry: @@ -884,7 +892,7 @@ ; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[CONV]], [[CONV]] ; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[CONVC]], 3.000000e+00 ; CHECK-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[OP_RDX]], [[OP_RDX1]] -; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[TMP2]], [[OP_RDX2]] +; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX2]], [[TMP2]] ; CHECK-NEXT: ret float [[OP_RDX3]] ; ; THRESHOLD-LABEL: @extra_args_no_replace( @@ -901,7 +909,7 @@ ; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP7]], i32 0 ; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP7]], i32 1 ; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = fadd fast float [[TMP8]], [[TMP9]] -; THRESHOLD-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[TMP2]], [[OP_RDX2]] +; THRESHOLD-NEXT: [[OP_RDX3:%.*]] = fadd fast float [[OP_RDX2]], [[TMP2]] ; THRESHOLD-NEXT: ret float [[OP_RDX3]] ; entry: @@ -1002,7 +1010,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32> ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 [[TMP3]], [[ARG]] -; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[TMP6]], [[OP_RDX]] +; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[OP_RDX]], [[TMP6]] ; CHECK-NEXT: ret i32 [[OP_RDX2]] ; ; THRESHOLD-LABEL: @wobble( @@ -1017,7 +1025,7 @@ ; THRESHOLD-NEXT: [[TMP5:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32> ; THRESHOLD-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]]) ; THRESHOLD-NEXT: [[OP_RDX:%.*]] = add i32 [[TMP3]], [[ARG]] -; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = add i32 [[TMP6]], [[OP_RDX]] +; THRESHOLD-NEXT: [[OP_RDX2:%.*]] = add i32 [[OP_RDX]], [[TMP6]] ; THRESHOLD-NEXT: ret i32 [[OP_RDX2]] ; bb: diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll @@ -875,10 +875,10 @@ ; AVX-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP6]]) ; AVX-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]] ; AVX-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP7]], i32 [[TMP8]] -; AVX-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[OP_RDX1]], [[TMP5]] -; AVX-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[OP_RDX1]], i32 [[TMP5]] -; AVX-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP9]], [[OP_RDX3]] -; AVX-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP9]], i32 [[OP_RDX3]] +; AVX-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[TMP5]], [[TMP9]] +; AVX-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[TMP5]], i32 [[TMP9]] +; AVX-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[OP_RDX1]], [[OP_RDX3]] +; AVX-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[OP_RDX1]], i32 [[OP_RDX3]] ; AVX-NEXT: [[TMP10:%.*]] = select i1 [[TMP4]], i32 3, i32 4 ; AVX-NEXT: store i32 [[TMP10]], i32* @var, align 8 ; AVX-NEXT: ret i32 [[OP_RDX5]] @@ -894,10 +894,10 @@ ; AVX2-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP6]]) ; AVX2-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]] ; AVX2-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP7]], i32 [[TMP8]] -; AVX2-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[OP_RDX1]], [[TMP5]] -; AVX2-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[OP_RDX1]], i32 [[TMP5]] -; AVX2-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP9]], [[OP_RDX3]] -; AVX2-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP9]], i32 [[OP_RDX3]] +; AVX2-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[TMP5]], [[TMP9]] +; AVX2-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[TMP5]], i32 [[TMP9]] +; AVX2-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[OP_RDX1]], [[OP_RDX3]] +; AVX2-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[OP_RDX1]], i32 [[OP_RDX3]] ; AVX2-NEXT: [[TMP10:%.*]] = select i1 [[TMP4]], i32 3, i32 4 ; AVX2-NEXT: store i32 [[TMP10]], i32* @var, align 8 ; AVX2-NEXT: ret i32 [[OP_RDX5]] @@ -906,25 +906,24 @@ ; THRESH-NEXT: [[TMP2:%.*]] = load <2 x i32>, <2 x i32>* bitcast ([32 x i32]* @arr to <2 x i32>*), align 16 ; THRESH-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0 ; THRESH-NEXT: [[TMP4:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1 -; THRESH-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2) to <4 x i32>*), align 8 -; THRESH-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6), align 8 -; THRESH-NEXT: [[TMP7:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 7), align 4 -; THRESH-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP5]]) -; THRESH-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP6]], i32 0 -; THRESH-NEXT: [[TMP10:%.*]] = insertelement <2 x i32> [[TMP9]], i32 [[TMP3]], i32 1 -; THRESH-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i32 0 -; THRESH-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP11]], i32 [[TMP4]], i32 1 -; THRESH-NEXT: [[TMP13:%.*]] = icmp sgt <2 x i32> [[TMP10]], [[TMP12]] -; THRESH-NEXT: [[TMP14:%.*]] = select <2 x i1> [[TMP13]], <2 x i32> [[TMP10]], <2 x i32> [[TMP12]] -; THRESH-NEXT: [[TMP15:%.*]] = extractelement <2 x i32> [[TMP14]], i32 0 -; THRESH-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[TMP14]], i32 1 -; THRESH-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[TMP15]], [[TMP16]] -; THRESH-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[TMP15]], i32 [[TMP16]] -; THRESH-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP8]], [[OP_RDX3]] -; THRESH-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP8]], i32 [[OP_RDX3]] -; THRESH-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP13]], i32 1 -; THRESH-NEXT: [[TMP18:%.*]] = select i1 [[TMP17]], i32 3, i32 4 -; THRESH-NEXT: store i32 [[TMP18]], i32* @var, align 8 +; THRESH-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], [[TMP4]] +; THRESH-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP3]], i32 [[TMP4]] +; THRESH-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2) to <4 x i32>*), align 8 +; THRESH-NEXT: [[TMP8:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6), align 8 +; THRESH-NEXT: [[TMP9:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 7), align 4 +; THRESH-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP7]]) +; THRESH-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> poison, i32 [[TMP8]], i32 0 +; THRESH-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP11]], i32 [[TMP6]], i32 1 +; THRESH-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP9]], i32 0 +; THRESH-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP10]], i32 1 +; THRESH-NEXT: [[TMP15:%.*]] = icmp sgt <2 x i32> [[TMP12]], [[TMP14]] +; THRESH-NEXT: [[TMP16:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP12]], <2 x i32> [[TMP14]] +; THRESH-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0 +; THRESH-NEXT: [[TMP18:%.*]] = extractelement <2 x i32> [[TMP16]], i32 1 +; THRESH-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP17]], [[TMP18]] +; THRESH-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP17]], i32 [[TMP18]] +; THRESH-NEXT: [[TMP19:%.*]] = select i1 [[TMP5]], i32 3, i32 4 +; THRESH-NEXT: store i32 [[TMP19]], i32* @var, align 8 ; THRESH-NEXT: ret i32 [[OP_RDX5]] ; %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16 @@ -1060,10 +1059,10 @@ ; AVX-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP6]]) ; AVX-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]] ; AVX-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP7]], i32 [[TMP8]] -; AVX-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[OP_RDX1]], [[TMP5]] -; AVX-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[OP_RDX1]], i32 [[TMP5]] -; AVX-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP9]], [[OP_RDX3]] -; AVX-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP9]], i32 [[OP_RDX3]] +; AVX-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[TMP5]], [[TMP9]] +; AVX-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[TMP5]], i32 [[TMP9]] +; AVX-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[OP_RDX1]], [[OP_RDX3]] +; AVX-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[OP_RDX1]], i32 [[OP_RDX3]] ; AVX-NEXT: ret i32 [[OP_RDX5]] ; ; AVX2-LABEL: @maxi8_wrong_parent( @@ -1079,10 +1078,10 @@ ; AVX2-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP6]]) ; AVX2-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]] ; AVX2-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP7]], i32 [[TMP8]] -; AVX2-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[OP_RDX1]], [[TMP5]] -; AVX2-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[OP_RDX1]], i32 [[TMP5]] -; AVX2-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP9]], [[OP_RDX3]] -; AVX2-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP9]], i32 [[OP_RDX3]] +; AVX2-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[TMP5]], [[TMP9]] +; AVX2-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[TMP5]], i32 [[TMP9]] +; AVX2-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[OP_RDX1]], [[OP_RDX3]] +; AVX2-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[OP_RDX1]], i32 [[OP_RDX3]] ; AVX2-NEXT: ret i32 [[OP_RDX5]] ; ; THRESH-LABEL: @maxi8_wrong_parent( @@ -1092,24 +1091,21 @@ ; THRESH-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP3]], [[TMP4]] ; THRESH-NEXT: br label [[PP:%.*]] ; THRESH: pp: -; THRESH-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2) to <4 x i32>*), align 8 -; THRESH-NEXT: [[TMP7:%.*]] = load <2 x i32>, <2 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6) to <2 x i32>*), align 8 -; THRESH-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP6]]) -; THRESH-NEXT: [[TMP9:%.*]] = extractelement <2 x i32> [[TMP7]], i32 0 -; THRESH-NEXT: [[TMP10:%.*]] = extractelement <2 x i32> [[TMP7]], i32 1 -; THRESH-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] -; THRESH-NEXT: [[TMP11:%.*]] = insertelement <2 x i1> poison, i1 [[OP_RDX]], i32 0 -; THRESH-NEXT: [[TMP12:%.*]] = insertelement <2 x i1> [[TMP11]], i1 [[TMP5]], i32 1 -; THRESH-NEXT: [[TMP13:%.*]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> [[TMP2]], <2 x i32> -; THRESH-NEXT: [[TMP14:%.*]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> poison, <2 x i32> -; THRESH-NEXT: [[TMP15:%.*]] = shufflevector <2 x i32> [[TMP14]], <2 x i32> [[TMP2]], <2 x i32> -; THRESH-NEXT: [[TMP16:%.*]] = select <2 x i1> [[TMP12]], <2 x i32> [[TMP13]], <2 x i32> [[TMP15]] +; THRESH-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP3]], i32 [[TMP4]] +; THRESH-NEXT: [[TMP7:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2) to <4 x i32>*), align 8 +; THRESH-NEXT: [[TMP8:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6), align 8 +; THRESH-NEXT: [[TMP9:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 7), align 4 +; THRESH-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP7]]) +; THRESH-NEXT: [[TMP11:%.*]] = insertelement <2 x i32> poison, i32 [[TMP8]], i32 0 +; THRESH-NEXT: [[TMP12:%.*]] = insertelement <2 x i32> [[TMP11]], i32 [[TMP6]], i32 1 +; THRESH-NEXT: [[TMP13:%.*]] = insertelement <2 x i32> poison, i32 [[TMP9]], i32 0 +; THRESH-NEXT: [[TMP14:%.*]] = insertelement <2 x i32> [[TMP13]], i32 [[TMP10]], i32 1 +; THRESH-NEXT: [[TMP15:%.*]] = icmp sgt <2 x i32> [[TMP12]], [[TMP14]] +; THRESH-NEXT: [[TMP16:%.*]] = select <2 x i1> [[TMP15]], <2 x i32> [[TMP12]], <2 x i32> [[TMP14]] ; THRESH-NEXT: [[TMP17:%.*]] = extractelement <2 x i32> [[TMP16]], i32 0 ; THRESH-NEXT: [[TMP18:%.*]] = extractelement <2 x i32> [[TMP16]], i32 1 -; THRESH-NEXT: [[OP_RDX2:%.*]] = icmp sgt i32 [[TMP17]], [[TMP18]] -; THRESH-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[TMP17]], i32 [[TMP18]] -; THRESH-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP8]], [[OP_RDX3]] -; THRESH-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP8]], i32 [[OP_RDX3]] +; THRESH-NEXT: [[OP_RDX4:%.*]] = icmp sgt i32 [[TMP17]], [[TMP18]] +; THRESH-NEXT: [[OP_RDX5:%.*]] = select i1 [[OP_RDX4]], i32 [[TMP17]], i32 [[TMP18]] ; THRESH-NEXT: ret i32 [[OP_RDX5]] ; %2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16 @@ -1435,7 +1431,7 @@ ; AVX-NEXT: [[T12:%.*]] = sub nsw i32 undef, undef ; AVX-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP2]]) ; AVX-NEXT: [[TMP4:%.*]] = call i32 @llvm.umin.i32(i32 [[T12]], i32 undef) -; AVX-NEXT: [[TMP5:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP3]], i32 [[TMP4]]) +; AVX-NEXT: [[TMP5:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP4]], i32 [[TMP3]]) ; AVX-NEXT: [[T14:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP5]], i32 93) ; AVX-NEXT: ret void ; @@ -1445,7 +1441,7 @@ ; AVX2-NEXT: [[T12:%.*]] = sub nsw i32 undef, undef ; AVX2-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP2]]) ; AVX2-NEXT: [[TMP4:%.*]] = call i32 @llvm.umin.i32(i32 [[T12]], i32 undef) -; AVX2-NEXT: [[TMP5:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP3]], i32 [[TMP4]]) +; AVX2-NEXT: [[TMP5:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP4]], i32 [[TMP3]]) ; AVX2-NEXT: [[T14:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP5]], i32 93) ; AVX2-NEXT: ret void ; @@ -1455,7 +1451,7 @@ ; THRESH-NEXT: [[T12:%.*]] = sub nsw i32 undef, undef ; THRESH-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP2]]) ; THRESH-NEXT: [[TMP4:%.*]] = call i32 @llvm.umin.i32(i32 [[T12]], i32 undef) -; THRESH-NEXT: [[TMP5:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP3]], i32 [[TMP4]]) +; THRESH-NEXT: [[TMP5:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP4]], i32 [[TMP3]]) ; THRESH-NEXT: [[T14:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP5]], i32 93) ; THRESH-NEXT: ret void ; diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal.ll @@ -220,7 +220,7 @@ ; CHECK-NEXT: [[TMP7:%.*]] = load float, float* [[ARRAYIDX48]], align 4 ; CHECK-NEXT: [[MUL49:%.*]] = fmul fast float [[TMP2]], [[TMP7]] ; CHECK-NEXT: [[TMP8:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float -0.000000e+00, <8 x float> [[TMP6]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP8]], [[MUL49]] +; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[MUL49]], [[TMP8]] ; CHECK-NEXT: [[ADD51]] = fadd fast float [[SUM_082]], [[OP_RDX]] ; CHECK-NEXT: [[INC]] = add nsw i64 [[I_083]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP3]] @@ -348,7 +348,7 @@ ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[TMP5]]) -; CHECK-NEXT: [[OP_RDX]] = fadd fast float [[TMP6]], [[SUM_042]] +; CHECK-NEXT: [[OP_RDX]] = fadd fast float [[SUM_042]], [[TMP6]] ; CHECK-NEXT: [[INC]] = add nsw i64 [[I_043]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[TMP2]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]], label [[FOR_BODY]] @@ -1192,10 +1192,17 @@ ; CHECK-LABEL: @nsw_propagation_v4i32( ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([32 x i32]* @arr_i32 to <4 x i32>*), align 16 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 [[TMP2]], [[START:%.*]] +; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 [[START:%.*]], [[TMP2]] ; CHECK-NEXT: store i32 [[OP_RDX]], i32* [[RES:%.*]], align 16 ; CHECK-NEXT: ret void ; + +; STORE-LABEL: @nsw_propagation_v4i32( +; STORE-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([32 x i32]* @arr_i32 to <4 x i32>*), align 16 +; STORE-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) +; STORE-NEXT: [[OP_RDX:%.*]] = add i32 [[START:%.*]], [[TMP2]] +; STORE-NEXT: store i32 [[OP_RDX]], i32* [[RES:%.*]], align 16 +; STORE-NEXT: ret void %t0 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 0), align 16 %t1 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 1), align 4 %t2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr_i32, i64 0, i64 2), align 8 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/malformed_phis.ll b/llvm/test/Transforms/SLPVectorizer/X86/malformed_phis.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/malformed_phis.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/malformed_phis.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i32> poison, i32 [[TMP]], i32 0 ; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> poison, <16 x i32> zeroinitializer ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.mul.v16i32(<16 x i32> [[SHUFFLE]]) -; CHECK-NEXT: [[OP_RDX]] = mul i32 [[TMP1]], undef +; CHECK-NEXT: [[OP_RDX]] = mul i32 undef, [[TMP1]] ; CHECK-NEXT: br label [[BB1]] ; bb: @@ -56,7 +56,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> undef) ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[SHUFFLE]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 [[TMP1]], [[TMP2]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = add i32 [[OP_RDX]], undef +; CHECK-NEXT: [[OP_RDX1:%.*]] = add i32 undef, [[OP_RDX]] ; CHECK-NEXT: call void @use(i32 [[OP_RDX1]]) ; CHECK-NEXT: br label [[BB2]] ; @@ -155,7 +155,7 @@ ; CHECK-NEXT: [[OP_RDX2:%.*]] = mul i32 [[TMP1]], [[TMP1]] ; CHECK-NEXT: [[OP_RDX3:%.*]] = mul i32 [[TMP1]], [[TMP1]] ; CHECK-NEXT: [[OP_RDX4:%.*]] = mul i32 [[OP_RDX2]], [[OP_RDX3]] -; CHECK-NEXT: [[OP_RDX5:%.*]] = mul i32 [[OP_RDX1]], [[OP_RDX4]] +; CHECK-NEXT: [[OP_RDX5:%.*]] = mul i32 [[OP_RDX4]], [[OP_RDX1]] ; CHECK-NEXT: [[VAL64:%.*]] = add i32 undef, [[OP_RDX5]] ; CHECK-NEXT: [[VAL65:%.*]] = sext i32 [[VAL64]] to i64 ; CHECK-NEXT: ret i64 [[VAL65]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-logical.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction-logical.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-logical.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction-logical.ll @@ -308,8 +308,8 @@ ; CHECK-NEXT: [[TMP6:%.*]] = freeze <4 x i1> [[TMP2]] ; CHECK-NEXT: [[TMP7:%.*]] = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> [[TMP6]]) ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3 -; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP8]], i1 [[S2]], i1 false -; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[TMP7]], i1 [[OP_RDX]], i1 false +; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP7]], i1 [[TMP8]], i1 false +; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[S2]], i1 [[OP_RDX]], i1 false ; CHECK-NEXT: ret i1 [[OP_RDX1]] ; %x0 = extractelement <4 x i32> %x, i32 0 @@ -398,10 +398,11 @@ ; SSE-NEXT: [[TMP8:%.*]] = freeze <4 x i1> [[TMP7]] ; SSE-NEXT: [[TMP9:%.*]] = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> [[TMP8]]) ; SSE-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP6]], i32 0 +; SSE-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP9]], i1 [[TMP10]], i1 false ; SSE-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP6]], i32 1 -; SSE-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP10]], i1 [[TMP11]], i1 false -; SSE-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i1 [[C2]], i1 false -; SSE-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP9]], i1 [[OP_RDX1]], i1 false +; SSE-NEXT: [[OP_RDX1:%.*]] = select i1 [[TMP11]], i1 [[C2]], i1 false +; SSE-NEXT: [[TMP12:%.*]] = freeze i1 [[OP_RDX]] +; SSE-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP12]], i1 [[OP_RDX1]], i1 false ; SSE-NEXT: ret i1 [[OP_RDX2]] ; ; AVX-LABEL: @logical_and_icmp_clamp_partial( @@ -414,9 +415,10 @@ ; AVX-NEXT: [[TMP4:%.*]] = icmp sgt <4 x i32> [[X]], ; AVX-NEXT: [[TMP5:%.*]] = freeze <4 x i1> [[TMP4]] ; AVX-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> [[TMP5]]) -; AVX-NEXT: [[OP_RDX:%.*]] = select i1 [[C1]], i1 [[C0]], i1 false -; AVX-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i1 [[C2]], i1 false -; AVX-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP6]], i1 [[OP_RDX1]], i1 false +; AVX-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP6]], i1 [[C1]], i1 false +; AVX-NEXT: [[OP_RDX1:%.*]] = select i1 [[C0]], i1 [[C2]], i1 false +; AVX-NEXT: [[TMP7:%.*]] = freeze i1 [[OP_RDX]] +; AVX-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP7]], i1 [[OP_RDX1]], i1 false ; AVX-NEXT: ret i1 [[OP_RDX2]] ; %x0 = extractelement <4 x i32> %x, i32 0 @@ -539,9 +541,11 @@ ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = freeze <4 x i1> [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.vector.reduce.and.v4i1(<4 x i1> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[C0:%.*]], i1 [[C1:%.*]], i1 false -; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i1 [[C2:%.*]], i1 false -; CHECK-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP3]], i1 [[OP_RDX1]], i1 false +; CHECK-NEXT: [[OP_RDX:%.*]] = select i1 [[TMP3]], i1 [[C0:%.*]], i1 false +; CHECK-NEXT: [[TMP4:%.*]] = freeze i1 [[C1:%.*]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[TMP4]], i1 [[C2:%.*]], i1 false +; CHECK-NEXT: [[TMP5:%.*]] = freeze i1 [[OP_RDX]] +; CHECK-NEXT: [[OP_RDX2:%.*]] = select i1 [[TMP5]], i1 [[OP_RDX1]], i1 false ; CHECK-NEXT: ret i1 [[OP_RDX2]] ; %x0 = extractelement <4 x i32> %x, i32 0 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction-transpose.ll @@ -53,7 +53,7 @@ ; SSE2-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP15]]) ; SSE2-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP7]]) ; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP16]], [[TMP17]] -; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]] +; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[ACC:%.*]], [[OP_RDX]] ; SSE2-NEXT: ret i32 [[OP_RDX1]] ; ; SSE42-LABEL: @reduce_and4( @@ -65,7 +65,7 @@ ; SSE42-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[TMP2]] ; SSE42-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[V1:%.*]]) ; SSE42-NEXT: [[OP_RDX2:%.*]] = and i32 [[OP_RDX1]], [[TMP3]] -; SSE42-NEXT: [[OP_RDX3:%.*]] = and i32 [[OP_RDX2]], [[ACC:%.*]] +; SSE42-NEXT: [[OP_RDX3:%.*]] = and i32 [[ACC:%.*]], [[OP_RDX2]] ; SSE42-NEXT: ret i32 [[OP_RDX3]] ; ; AVX-LABEL: @reduce_and4( @@ -105,7 +105,7 @@ ; AVX-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP15]]) ; AVX-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP7]]) ; AVX-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP16]], [[TMP17]] -; AVX-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]] +; AVX-NEXT: [[OP_RDX1:%.*]] = and i32 [[ACC:%.*]], [[OP_RDX]] ; AVX-NEXT: ret i32 [[OP_RDX1]] ; entry: @@ -189,7 +189,7 @@ ; SSE2-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP16]]) ; SSE2-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP8]]) ; SSE2-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP17]], [[TMP18]] -; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]] +; SSE2-NEXT: [[OP_RDX1:%.*]] = and i32 [[ACC:%.*]], [[OP_RDX]] ; SSE2-NEXT: ret i32 [[OP_RDX1]] ; ; SSE42-LABEL: @reduce_and4_transpose( @@ -200,7 +200,7 @@ ; SSE42-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[TMP3]] ; SSE42-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> [[V1:%.*]]) ; SSE42-NEXT: [[OP_RDX2:%.*]] = and i32 [[OP_RDX1]], [[TMP4]] -; SSE42-NEXT: [[OP_RDX3:%.*]] = and i32 [[OP_RDX2]], [[ACC:%.*]] +; SSE42-NEXT: [[OP_RDX3:%.*]] = and i32 [[ACC:%.*]], [[OP_RDX2]] ; SSE42-NEXT: ret i32 [[OP_RDX3]] ; ; AVX-LABEL: @reduce_and4_transpose( @@ -239,7 +239,7 @@ ; AVX-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP16]]) ; AVX-NEXT: [[TMP18:%.*]] = call i32 @llvm.vector.reduce.and.v8i32(<8 x i32> [[TMP8]]) ; AVX-NEXT: [[OP_RDX:%.*]] = and i32 [[TMP17]], [[TMP18]] -; AVX-NEXT: [[OP_RDX1:%.*]] = and i32 [[OP_RDX]], [[ACC:%.*]] +; AVX-NEXT: [[OP_RDX1:%.*]] = and i32 [[ACC:%.*]], [[OP_RDX]] ; AVX-NEXT: ret i32 [[OP_RDX1]] ; %vecext = extractelement <4 x i32> %v1, i64 0 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction_loads.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction_loads.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/reduction_loads.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction_loads.ll @@ -29,7 +29,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], ; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP3]], [[SUM]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[SUM]], [[TMP3]] ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[OP_RDX]] @@ -104,7 +104,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, <8 x i32>* [[TMP2]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[TMP3]] ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP5]], [[SUM]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[SUM]], [[TMP5]] ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[OP_RDX]] @@ -196,7 +196,7 @@ ; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <8 x i32> [[TMP3]], <8 x i32> poison, <8 x i32> ; CHECK-NEXT: [[TMP4:%.*]] = mul <8 x i32> [[TMP1]], [[SHUFFLE]] ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP4]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP5]], [[SUM]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[SUM]], [[TMP5]] ; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[FOR_BODY]] ; CHECK: for.end: ; CHECK-NEXT: ret i32 [[OP_RDX]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll b/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll @@ -24,7 +24,7 @@ ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[ARRAYIDX6]] to <4 x i32>* ; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 16 ; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP9]], [[A_088]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[A_088]], [[TMP9]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder_repeated_ops.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder_repeated_ops.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/reorder_repeated_ops.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder_repeated_ops.ll @@ -28,8 +28,8 @@ ; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP8]]) ; CHECK-NEXT: [[OP_RDX:%.*]] = icmp slt i32 [[TMP9]], [[TMP10]] ; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP9]], i32 [[TMP10]] -; CHECK-NEXT: [[OP_RDX2:%.*]] = icmp slt i32 [[OP_RDX1]], undef -; CHECK-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 [[OP_RDX1]], i32 undef +; CHECK-NEXT: [[OP_RDX2:%.*]] = icmp slt i32 undef, [[OP_RDX1]] +; CHECK-NEXT: [[OP_RDX3:%.*]] = select i1 [[OP_RDX2]], i32 undef, i32 [[OP_RDX1]] ; CHECK-NEXT: [[T45:%.*]] = icmp sgt i32 undef, [[OP_RDX3]] ; CHECK-NEXT: unreachable ; diff --git a/llvm/test/Transforms/SLPVectorizer/X86/revectorized_rdx_crash.ll b/llvm/test/Transforms/SLPVectorizer/X86/revectorized_rdx_crash.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/revectorized_rdx_crash.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/revectorized_rdx_crash.ll @@ -22,17 +22,18 @@ ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[I]] to <4 x i32>* ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 8 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP1]]) -; CHECK-NEXT: [[OP_RDX6:%.*]] = add i32 [[TMP2]], undef +; CHECK-NEXT: [[OP_RDX7:%.*]] = add i32 undef, [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[I1]] to <4 x i32>* ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* [[TMP3]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP4]]) -; CHECK-NEXT: [[OP_RDX5:%.*]] = add i32 [[TMP5]], undef +; CHECK-NEXT: [[OP_RDX5:%.*]] = add i32 undef, [[TMP5]] +; CHECK-NEXT: [[OP_RDX6:%.*]] = add i32 undef, [[OP_RDX5]] ; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> undef) -; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 undef, [[OP_RDX6]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = add i32 [[OP_RDX6]], [[OP_RDX5]] -; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[OP_RDX]], [[OP_RDX1]] -; CHECK-NEXT: [[OP_RDX3:%.*]] = add i32 [[OP_RDX2]], [[OP_RDX5]] -; CHECK-NEXT: [[OP_RDX4:%.*]] = add i32 [[TMP6]], [[OP_RDX3]] +; CHECK-NEXT: [[OP_RDX:%.*]] = add i32 undef, [[OP_RDX7]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = add i32 [[OP_RDX7]], [[OP_RDX6]] +; CHECK-NEXT: [[OP_RDX2:%.*]] = add i32 [[OP_RDX6]], [[TMP6]] +; CHECK-NEXT: [[OP_RDX3:%.*]] = add i32 [[OP_RDX]], [[OP_RDX1]] +; CHECK-NEXT: [[OP_RDX4:%.*]] = add i32 [[OP_RDX3]], [[OP_RDX2]] ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: [[R:%.*]] = phi i32 [ [[OP_RDX4]], [[FOR_COND_PREHEADER]] ], [ undef, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll b/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/scheduling.ll @@ -23,7 +23,7 @@ ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[ARRAYIDX6]] to <4 x i32>* ; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 16 ; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP7]]) -; CHECK-NEXT: [[OP_RDX]] = add i32 [[TMP9]], [[A_088]] +; CHECK-NEXT: [[OP_RDX]] = add i32 [[A_088]], [[TMP9]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 8 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] diff --git a/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll b/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> , double [[CVT0]], i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x double> zeroinitializer, [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX]] = fadd fast double [[TMP3]], [[PHI0]] +; CHECK-NEXT: [[OP_RDX]] = fadd fast double [[PHI0]], [[TMP3]] ; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]] ; CHECK: exit: ; CHECK-NEXT: ret void @@ -55,7 +55,7 @@ ; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x double> , [[SHUFFLE]] ; CHECK-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[TMP2]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast double [[TMP3]], [[CVT0]] +; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast double [[CVT0]], [[TMP3]] ; CHECK-NEXT: ret double [[OP_RDX]] ; %cvt0 = uitofp i16 3 to double diff --git a/llvm/test/Transforms/SLPVectorizer/X86/undef_vect.ll b/llvm/test/Transforms/SLPVectorizer/X86/undef_vect.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/undef_vect.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/undef_vect.ll @@ -10,8 +10,8 @@ ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[DOTSROA_CAST_4]] to <8 x i32>* ; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> [[TMP1]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 [[TMP2]], undef -; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP2]], i32 undef +; CHECK-NEXT: [[OP_RDX:%.*]] = icmp sgt i32 undef, [[TMP2]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 undef, i32 [[TMP2]] ; CHECK-NEXT: [[DOTSROA_SPECULATED_9:%.*]] = select i1 undef, i32 undef, i32 [[OP_RDX1]] ; CHECK-NEXT: [[CMP_I1_10:%.*]] = icmp slt i32 [[DOTSROA_SPECULATED_9]], undef ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/SLPVectorizer/X86/used-reduced-op.ll b/llvm/test/Transforms/SLPVectorizer/X86/used-reduced-op.ll --- a/llvm/test/Transforms/SLPVectorizer/X86/used-reduced-op.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/used-reduced-op.ll @@ -35,8 +35,8 @@ ; CHECK-NEXT: [[TMP19:%.*]] = sub nsw <4 x i32> zeroinitializer, [[TMP17]] ; CHECK-NEXT: [[TMP20:%.*]] = select <4 x i1> [[TMP18]], <4 x i32> [[TMP19]], <4 x i32> [[TMP17]] ; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP20]]) -; CHECK-NEXT: [[OP_RDX:%.*]] = icmp slt i32 [[TMP21]], [[B_0]] -; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[TMP21]], i32 [[B_0]] +; CHECK-NEXT: [[OP_RDX:%.*]] = icmp slt i32 [[B_0]], [[TMP21]] +; CHECK-NEXT: [[OP_RDX1:%.*]] = select i1 [[OP_RDX]], i32 [[B_0]], i32 [[TMP21]] ; CHECK-NEXT: [[SUB_116:%.*]] = sub i32 [[TMP15]], [[TMP1]] ; CHECK-NEXT: [[TMP22:%.*]] = icmp slt i32 [[SUB_116]], 0 ; CHECK-NEXT: [[NEG_117:%.*]] = sub nsw i32 0, [[SUB_116]] diff --git a/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll b/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll --- a/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll +++ b/llvm/test/Transforms/SLPVectorizer/slp-umax-rdx-matcher-crash.ll @@ -46,7 +46,7 @@ ; CHECK-NEXT: [[TMP0:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> ) ; CHECK-NEXT: [[TMP1:%.*]] = sub nsw <4 x i32> undef, [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> [[TMP1]]) -; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.umin.i32(i32 [[TMP2]], i32 77) +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.umin.i32(i32 77, i32 [[TMP2]]) ; CHECK-NEXT: [[E:%.*]] = icmp ugt i32 [[TMP3]], 1 ; CHECK-NEXT: ret void ;