Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -1559,6 +1559,56 @@ return false; } +// Finds an integer D for an expression (C + x + y + ...) such that the top +// level addition in (D + (C - D + x + y + ...)) would not wrap (signed or +// unsigned) and the number of trailing zeros of (C - D + x + y + ...) is +// maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and +// the (C + x + y + ...) expression is \p WholeAddExpr. +static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, + const SCEVConstant *ConstantTerm, + const SCEVAddExpr *WholeAddExpr) { + // ConstantRange is unable to prove that it's possible to transform + // an expression like (5 + (4 * X)) to (1 + (4 + (4 * X))) w/o underflowing: + // + // | Expression | ConstantRange | KnownBits | + // |---------------|------------------------|-----------------------| + // | i8 4 * X | [L: 0, U: 253) | XXXX XX00 | + // | | => Min: 0, Max: 252 | => Min: 0, Max: 252 | + // | | | | + // | i8 5 + 4 * X | [L: 5, U: 2) (wrapped) | YYYY YY01 | + // | (101) | => Min: 0, Max: 255 | => Min: 1, Max: 253 | + // + // As KnownBits are unavailable for SCEV expressions use number of trailing + // zeroes instead: + const APInt C = ConstantTerm->getAPInt(); + const unsigned BitWidth = C.getBitWidth(); + // Find number of trailing zeros of (x + y + ...) w/o the C first: + uint32_t TZ = BitWidth; + for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I) + TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I))); + if (TZ) { + // Set D to be as many least significant bits of C as possible while still + // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap: + return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C; + } + return APInt(BitWidth, 0); +} + +// Finds an integer D for an affine AddRec expression {C,+,x} such that the top +// level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the +// number of trailing zeros of (C - D + x * n) is maximized, where C is the \p +// ConstantStart, x is an arbitrary \p Step, and n is the loop trip count. +static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, + const APInt &ConstantStart, + const SCEV *Step) { + const unsigned BitWidth = ConstantStart.getBitWidth(); + const uint32_t TZ = SE.GetMinTrailingZeros(Step); + if (TZ) + return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth) + : ConstantStart; + return APInt(BitWidth, 0); +} + const SCEV * ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) { assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) && @@ -1745,6 +1795,23 @@ } } + // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step})) + // if D + (C - D + Step * n) could be proven to not unsigned wrap + // where D maximizes the number of trailing zeros of (C - D + Step * n) + if (const auto *SC = dyn_cast(Start)) { + const APInt &C = SC->getAPInt(); + const APInt &D = extractConstantWithoutWrapping(*this, C, Step); + if (D != 0) { + const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); + const SCEV *SResidual = + getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); + const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); + return getAddExpr(SZExtD, SZExtR, + (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), + Depth + 1); + } + } + if (proveNoWrapByVaryingStart(Start, Step, L)) { const_cast(AR)->setNoWrapFlags(SCEV::FlagNUW); return getAddRecExpr( @@ -1777,6 +1844,27 @@ Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1)); return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1); } + + // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...)) + // if D + (C - D + x + y + ...) could be proven to not unsigned wrap + // where D maximizes the number of trailing zeros of (C - D + x + y + ...) + // + // Often address arithmetics contain expressions like + // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). + // This transformation is useful while proving that such expressions are + // equal or differ by a small constant amount, see LoadStoreVectorizer pass. + if (const auto *SC = dyn_cast(SA->getOperand(0))) { + const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); + if (D != 0) { + const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth); + const SCEV *SResidual = + getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); + const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1); + return getAddExpr(SZExtD, SZExtR, + (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), + Depth + 1); + } + } } if (auto *SM = dyn_cast(Op)) { @@ -1878,24 +1966,7 @@ return getTruncateOrSignExtend(X, Ty); } - // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2 if (auto *SA = dyn_cast(Op)) { - if (SA->getNumOperands() == 2) { - auto *SC1 = dyn_cast(SA->getOperand(0)); - auto *SMul = dyn_cast(SA->getOperand(1)); - if (SMul && SC1) { - if (auto *SC2 = dyn_cast(SMul->getOperand(0))) { - const APInt &C1 = SC1->getAPInt(); - const APInt &C2 = SC2->getAPInt(); - if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && - C2.ugt(C1) && C2.isPowerOf2()) - return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1), - getSignExtendExpr(SMul, Ty, Depth + 1), - SCEV::FlagAnyWrap, Depth + 1); - } - } - } - // sext((A + B + ...)) --> (sext(A) + sext(B) + ...) if (SA->hasNoSignedWrap()) { // If the addition does not sign overflow then we can, by definition, @@ -1905,6 +1976,28 @@ Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1)); return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1); } + + // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...)) + // if D + (C - D + x + y + ...) could be proven to not signed wrap + // where D maximizes the number of trailing zeros of (C - D + x + y + ...) + // + // For instance, this will bring two seemingly different expressions: + // 1 + sext(5 + 20 * %x + 24 * %y) and + // sext(6 + 20 * %x + 24 * %y) + // to the same form: + // 2 + sext(4 + 20 * %x + 24 * %y) + if (const auto *SC = dyn_cast(SA->getOperand(0))) { + const APInt &D = extractConstantWithoutWrapping(*this, SC, SA); + if (D != 0) { + const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); + const SCEV *SResidual = + getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth); + const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); + return getAddExpr(SSExtD, SSExtR, + (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), + Depth + 1); + } + } } // If the input value is a chrec scev, and we can prove that the value // did not overflow the old, smaller, value, we can sign extend all of the @@ -2034,21 +2127,20 @@ } } - // If Start and Step are constants, check if we can apply this - // transformation: - // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2 - auto *SC1 = dyn_cast(Start); - auto *SC2 = dyn_cast(Step); - if (SC1 && SC2) { - const APInt &C1 = SC1->getAPInt(); - const APInt &C2 = SC2->getAPInt(); - if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) && - C2.isPowerOf2()) { - Start = getSignExtendExpr(Start, Ty, Depth + 1); - const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L, - AR->getNoWrapFlags()); - return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1), - SCEV::FlagAnyWrap, Depth + 1); + // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step})) + // if D + (C - D + Step * n) could be proven to not signed wrap + // where D maximizes the number of trailing zeros of (C - D + Step * n) + if (const auto *SC = dyn_cast(Start)) { + const APInt &C = SC->getAPInt(); + const APInt &D = extractConstantWithoutWrapping(*this, C, Step); + if (D != 0) { + const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth); + const SCEV *SResidual = + getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags()); + const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1); + return getAddExpr(SSExtD, SSExtR, + (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW), + Depth + 1); } } Index: test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll =================================================================== --- test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll +++ test/Analysis/ScalarEvolution/no-wrap-add-exprs.ll @@ -120,3 +120,84 @@ ret void } + +@z_addr = external global [16 x i8], align 4 +@z_addr_noalign = external global [16 x i8] + +%union = type { [10 x [4 x float]] } +@tmp_addr = external unnamed_addr global { %union, [2000 x i8] } + +define void @f3(i8* %x_addr, i8* %y_addr, i32* %tmp_addr) { +; CHECK-LABEL: Classifying expressions for: @f3 + entry: + %x = load i8, i8* %x_addr + %t0 = mul i8 %x, 4 + %t1 = add i8 %t0, 5 + %t1.zext = zext i8 %t1 to i16 +; CHECK: %t1.zext = zext i8 %t1 to i16 +; CHECK-NEXT: --> (1 + (zext i8 (4 + (4 * %x)) to i16)) U: [1,254) S: [1,257) + + %q0 = mul i8 %x, 4 + %q1 = add i8 %q0, 7 + %q1.zext = zext i8 %q1 to i16 +; CHECK: %q1.zext = zext i8 %q1 to i16 +; CHECK-NEXT: --> (3 + (zext i8 (4 + (4 * %x)) to i16)) U: [3,256) S: [3,259) + + %p0 = mul i8 %x, 4 + %p1 = add i8 %p0, 8 + %p1.zext = zext i8 %p1 to i16 +; CHECK: %p1.zext = zext i8 %p1 to i16 +; CHECK-NEXT: --> (zext i8 (8 + (4 * %x)) to i16) U: [0,253) S: [0,256) + + %r0 = mul i8 %x, 4 + %r1 = add i8 %r0, 254 + %r1.zext = zext i8 %r1 to i16 +; CHECK: %r1.zext = zext i8 %r1 to i16 +; CHECK-NEXT: --> (2 + (zext i8 (-4 + (4 * %x)) to i16)) U: [2,255) S: [2,258) + + %y = load i8, i8* %y_addr + %s0 = mul i8 %x, 32 + %s1 = mul i8 %y, 36 + %s2 = add i8 %s0, %s1 + %s3 = add i8 %s2, 5 + %s3.zext = zext i8 %s3 to i16 +; CHECK: %s3.zext = zext i8 %s3 to i16 +; CHECK-NEXT: --> (1 + (zext i8 (4 + (32 * %x) + (36 * %y)) to i16)) U: [1,254) S: [1,257) + + %ptr = bitcast [16 x i8]* @z_addr to i8* + %int0 = ptrtoint i8* %ptr to i32 + %int5 = add i32 %int0, 5 + %int.zext = zext i32 %int5 to i64 +; CHECK: %int.zext = zext i32 %int5 to i64 +; CHECK-NEXT: --> (1 + (zext i32 (4 + %int0) to i64)) U: [1,4294967294) S: [1,4294967297) + + %ptr_noalign = bitcast [16 x i8]* @z_addr_noalign to i8* + %int0_na = ptrtoint i8* %ptr_noalign to i32 + %int5_na = add i32 %int0_na, 5 + %int.zext_na = zext i32 %int5_na to i64 +; CHECK: %int.zext_na = zext i32 %int5_na to i64 +; CHECK-NEXT: --> (zext i32 (5 + %int0_na) to i64) U: [0,4294967296) S: [0,4294967296) + + %tmp = load i32, i32* %tmp_addr + %mul = and i32 %tmp, -4 + %add4 = add i32 %mul, 4 + %add4.zext = zext i32 %add4 to i64 + %sunkaddr3 = mul i64 %add4.zext, 4 + %sunkaddr4 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @tmp_addr to i8*), i64 %sunkaddr3 + %sunkaddr5 = getelementptr inbounds i8, i8* %sunkaddr4, i64 4096 + %addr4.cast = bitcast i8* %sunkaddr5 to i32* + %addr4.incr = getelementptr i32, i32* %addr4.cast, i64 1 +; CHECK: %addr4.incr = getelementptr i32, i32* %addr4.cast, i64 1 +; CHECK-NEXT: --> ([[C:4100]] + ([[SIZE:4]] * (zext i32 ([[OFFSET:4]] + ([[STRIDE:4]] * (%tmp /u [[STRIDE]]))) to i64)) + @tmp_addr) + + %add5 = add i32 %mul, 5 + %add5.zext = zext i32 %add5 to i64 + %sunkaddr0 = mul i64 %add5.zext, 4 + %sunkaddr1 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @tmp_addr to i8*), i64 %sunkaddr0 + %sunkaddr2 = getelementptr inbounds i8, i8* %sunkaddr1, i64 4096 + %addr5.cast = bitcast i8* %sunkaddr2 to i32* +; CHECK: %addr5.cast = bitcast i8* %sunkaddr2 to i32* +; CHECK-NEXT: --> ([[C]] + ([[SIZE]] * (zext i32 ([[OFFSET]] + ([[STRIDE]] * (%tmp /u [[STRIDE]]))) to i64)) + @tmp_addr) + + ret void +} Index: test/Transforms/IndVarSimplify/shrunk-constant.ll =================================================================== --- test/Transforms/IndVarSimplify/shrunk-constant.ll +++ test/Transforms/IndVarSimplify/shrunk-constant.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -scalar-evolution -analyze | FileCheck %s -; CHECK: --> (zext i4 {-7,+,-8}<%loop> to i32) +; CHECK: --> (1 + (zext i4 {-8,+,-8}<%loop> to i32)) define fastcc void @foo() nounwind { entry: Index: test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll @@ -0,0 +1,78 @@ +; RUN: opt -codegenprepare -load-store-vectorizer %s -S -o - | FileCheck %s +; RUN: opt -load-store-vectorizer %s -S -o - | FileCheck %s + +target triple = "x86_64--" + +%union = type { { [4 x [4 x [4 x [16 x float]]]], [4 x [4 x [4 x [16 x float]]]], [10 x [10 x [4 x float]]] } } + +@global_pointer = external unnamed_addr global { %union, [2000 x i8] }, align 4 + +; Function Attrs: convergent nounwind +define void @test(i32 %base) #0 { +; CHECK-LABEL: @test( +; CHECK-NOT: load i32 +; CHECK: load <2 x i32> +; CHECK-NOT: load i32 +entry: + %mul331 = and i32 %base, -4 + %add350.4 = add i32 4, %mul331 + %idx351.4 = zext i32 %add350.4 to i64 + %arrayidx352.4 = getelementptr inbounds { %union, [2000 x i8] }, { %union, [2000 x i8] }* @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.4 + %tmp296.4 = bitcast float* %arrayidx352.4 to i32* + %add350.5 = add i32 5, %mul331 + %idx351.5 = zext i32 %add350.5 to i64 + %arrayidx352.5 = getelementptr inbounds { %union, [2000 x i8] }, { %union, [2000 x i8] }* @global_pointer, i64 0, i32 0, i32 0, i32 1, i64 0, i64 0, i64 0, i64 %idx351.5 + %tmp296.5 = bitcast float* %arrayidx352.5 to i32* + %cnd = icmp ult i32 %base, 1000 + br i1 %cnd, label %loads, label %exit + +loads: + ; If and only if the loads are in a different BB from the GEPs codegenprepare + ; would try to turn the GEPs into math, which makes LoadStoreVectorizer's job + ; harder + %tmp297.4 = load i32, i32* %tmp296.4, align 4, !tbaa !0 + %tmp297.5 = load i32, i32* %tmp296.5, align 4, !tbaa !0 + br label %exit + +exit: + ret void +} + +; Function Attrs: convergent nounwind +define void @test.codegenprepared(i32 %base) #0 { +; CHECK-LABEL: @test.codegenprepared( +; CHECK-NOT: load i32 +; CHECK: load <2 x i32> +; CHECK-NOT: load i32 +entry: + %mul331 = and i32 %base, -4 + %add350.4 = add i32 4, %mul331 + %idx351.4 = zext i32 %add350.4 to i64 + %add350.5 = add i32 5, %mul331 + %idx351.5 = zext i32 %add350.5 to i64 + %cnd = icmp ult i32 %base, 1000 + br i1 %cnd, label %loads, label %exit + +loads: ; preds = %entry + %sunkaddr = mul i64 %idx351.4, 4 + %sunkaddr1 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @global_pointer to i8*), i64 %sunkaddr + %sunkaddr2 = getelementptr inbounds i8, i8* %sunkaddr1, i64 4096 + %0 = bitcast i8* %sunkaddr2 to i32* + %tmp297.4 = load i32, i32* %0, align 4, !tbaa !0 + %sunkaddr3 = mul i64 %idx351.5, 4 + %sunkaddr4 = getelementptr inbounds i8, i8* bitcast ({ %union, [2000 x i8] }* @global_pointer to i8*), i64 %sunkaddr3 + %sunkaddr5 = getelementptr inbounds i8, i8* %sunkaddr4, i64 4096 + %1 = bitcast i8* %sunkaddr5 to i32* + %tmp297.5 = load i32, i32* %1, align 4, !tbaa !0 + br label %exit + +exit: ; preds = %loads, %entry + ret void +} + +attributes #0 = { convergent nounwind } + +!0 = !{!1, !1, i64 0} +!1 = !{!"float", !2, i64 0} +!2 = !{!"omnipotent char", !3, i64 0} +!3 = !{!"Simple C++ TBAA"} Index: test/Transforms/SLPVectorizer/X86/consecutive-access.ll =================================================================== --- test/Transforms/SLPVectorizer/X86/consecutive-access.ll +++ test/Transforms/SLPVectorizer/X86/consecutive-access.ll @@ -168,6 +168,168 @@ ret i32 %conv } +; Similar to foo_2double but with a non-power-of-2 factor and potential +; wrapping (both indices wrap or both don't in the same time) +; CHECK-LABEL: foo_2double_non_power_of_2 +; CHECK: load <2 x double> +; CHECK: load <2 x double> +; Function Attrs: nounwind ssp uwtable +define void @foo_2double_non_power_of_2(i32 %u) #0 { +entry: + %u.addr = alloca i32, align 4 + store i32 %u, i32* %u.addr, align 4 + %mul = mul i32 %u, 6 + %add6 = add i32 %mul, 6 + %idxprom = sext i32 %add6 to i64 + %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom + %0 = load double, double* %arrayidx, align 8 + %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom + %1 = load double, double* %arrayidx4, align 8 + %add5 = fadd double %0, %1 + store double %add5, double* %arrayidx, align 8 + %add7 = add i32 %mul, 7 + %idxprom12 = sext i32 %add7 to i64 + %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 + %2 = load double, double* %arrayidx13, align 8 + %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 + %3 = load double, double* %arrayidx17, align 8 + %add18 = fadd double %2, %3 + store double %add18, double* %arrayidx13, align 8 + ret void +} + +; Similar to foo_2double_non_power_of_2 but with zext's instead of sext's +; CHECK-LABEL: foo_2double_non_power_of_2_zext +; CHECK: load <2 x double> +; CHECK: load <2 x double> +; Function Attrs: nounwind ssp uwtable +define void @foo_2double_non_power_of_2_zext(i32 %u) #0 { +entry: + %u.addr = alloca i32, align 4 + store i32 %u, i32* %u.addr, align 4 + %mul = mul i32 %u, 6 + %add6 = add i32 %mul, 6 + %idxprom = zext i32 %add6 to i64 + %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom + %0 = load double, double* %arrayidx, align 8 + %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom + %1 = load double, double* %arrayidx4, align 8 + %add5 = fadd double %0, %1 + store double %add5, double* %arrayidx, align 8 + %add7 = add i32 %mul, 7 + %idxprom12 = zext i32 %add7 to i64 + %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 + %2 = load double, double* %arrayidx13, align 8 + %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 + %3 = load double, double* %arrayidx17, align 8 + %add18 = fadd double %2, %3 + store double %add18, double* %arrayidx13, align 8 + ret void +} + +; Similar to foo_2double_non_power_of_2, but now we are dealing with AddRec SCEV. +; Alternatively, this is like foo_loop, but with a non-power-of-2 factor and +; potential wrapping (both indices wrap or both don't in the same time) +; CHECK-LABEL: foo_loop_non_power_of_2 +; CHECK: <2 x double> +; Function Attrs: nounwind ssp uwtable +define i32 @foo_loop_non_power_of_2(double* %A, i32 %n) #0 { +entry: + %A.addr = alloca double*, align 8 + %n.addr = alloca i32, align 4 + %sum = alloca double, align 8 + %i = alloca i32, align 4 + store double* %A, double** %A.addr, align 8 + store i32 %n, i32* %n.addr, align 4 + store double 0.000000e+00, double* %sum, align 8 + store i32 0, i32* %i, align 4 + %cmp1 = icmp slt i32 0, %n + br i1 %cmp1, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.lr.ph, %for.body + %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] + %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ] + %mul = mul i32 %0, 12 + %add.5 = add i32 %mul, 5 + %idxprom = sext i32 %add.5 to i64 + %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom + %2 = load double, double* %arrayidx, align 8 + %mul1 = fmul double 7.000000e+00, %2 + %add.6 = add i32 %mul, 6 + %idxprom3 = sext i32 %add.6 to i64 + %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3 + %3 = load double, double* %arrayidx4, align 8 + %mul5 = fmul double 7.000000e+00, %3 + %add6 = fadd double %mul1, %mul5 + %add7 = fadd double %1, %add6 + store double %add7, double* %sum, align 8 + %inc = add i32 %0, 1 + store i32 %inc, i32* %i, align 4 + %cmp = icmp slt i32 %inc, %n + br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge + +for.cond.for.end_crit_edge: ; preds = %for.body + %split = phi double [ %add7, %for.body ] + br label %for.end + +for.end: ; preds = %for.cond.for.end_crit_edge, %entry + %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ] + %conv = fptosi double %.lcssa to i32 + ret i32 %conv +} + +; This is generated by `clang -std=c11 -Wpedantic -Wall -O3 main.c -S -o - -emit-llvm` +; with !{!"clang version 7.0.0 (trunk 337339) (llvm/trunk 337344)"} and stripping off +; the !tbaa metadata nodes to fit the rest of the test file, where `cat main.c` is: +; +; double bar(double *a, unsigned n) { +; double x = 0.0; +; double y = 0.0; +; for (unsigned i = 0; i < n; i += 2) { +; x += a[i]; +; y += a[i + 1]; +; } +; return x * y; +; } +; +; The resulting IR is similar to @foo_loop, but with zext's instead of sext's. +; +; Make sure we are able to vectorize this from now on: +; +; CHECK-LABEL: @bar +; CHECK: load <2 x double> +define double @bar(double* nocapture readonly %a, i32 %n) local_unnamed_addr #0 { +entry: + %cmp15 = icmp eq i32 %n, 0 + br i1 %cmp15, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: ; preds = %for.body, %entry + %x.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ] + %y.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add4, %for.body ] + %mul = fmul double %x.0.lcssa, %y.0.lcssa + ret double %mul + +for.body: ; preds = %entry, %for.body + %i.018 = phi i32 [ %add5, %for.body ], [ 0, %entry ] + %y.017 = phi double [ %add4, %for.body ], [ 0.000000e+00, %entry ] + %x.016 = phi double [ %add, %for.body ], [ 0.000000e+00, %entry ] + %idxprom = zext i32 %i.018 to i64 + %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom + %0 = load double, double* %arrayidx, align 8 + %add = fadd double %x.016, %0 + %add1 = or i32 %i.018, 1 + %idxprom2 = zext i32 %add1 to i64 + %arrayidx3 = getelementptr inbounds double, double* %a, i64 %idxprom2 + %1 = load double, double* %arrayidx3, align 8 + %add4 = fadd double %y.017, %1 + %add5 = add i32 %i.018, 2 + %cmp = icmp ult i32 %add5, %n + br i1 %cmp, label %for.body, label %for.cond.cleanup +} + attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } !llvm.ident = !{!0}