Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -30475,19 +30475,32 @@ } /// sext(add_nsw(x, C)) --> add(sext(x), C_sext) -/// Promoting a sign extension ahead of an 'add nsw' exposes opportunities -/// to combine math ops, use an LEA, or use a complex addressing mode. This can -/// eliminate extend, add, and shift instructions. -static SDValue promoteSextBeforeAddNSW(SDNode *Sext, SelectionDAG &DAG, - const X86Subtarget &Subtarget) { +/// zext(add_nuw(x, C)) --> add(zext(x), C_zext) +/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes +/// opportunities to combine math ops, use an LEA, or use a complex addressing +/// mode. This can eliminate extend, add, and shift instructions. +static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + if (Ext->getOpcode() != ISD::SIGN_EXTEND && + Ext->getOpcode() != ISD::ZERO_EXTEND) + return SDValue(); + // TODO: This should be valid for other integer types. - EVT VT = Sext->getValueType(0); + EVT VT = Ext->getValueType(0); if (VT != MVT::i64) return SDValue(); - // We need an 'add nsw' feeding into the 'sext'. - SDValue Add = Sext->getOperand(0); - if (Add.getOpcode() != ISD::ADD || !Add->getFlags()->hasNoSignedWrap()) + SDValue Add = Ext->getOperand(0); + if (Add.getOpcode() != ISD::ADD) + return SDValue(); + + bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND; + bool NSW = Add->getFlags()->hasNoSignedWrap(); + bool NUW = Add->getFlags()->hasNoUnsignedWrap(); + + // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding + // into the 'zext' + if ((Sext && !NSW) || (!Sext && !NUW)) return SDValue(); // Having a constant operand to the 'add' ensures that we are not increasing @@ -30503,7 +30516,7 @@ // of single 'add' instructions, but the cost model for selecting an LEA // currently has a high threshold. bool HasLEAPotential = false; - for (auto *User : Sext->uses()) { + for (auto *User : Ext->uses()) { if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) { HasLEAPotential = true; break; @@ -30512,17 +30525,18 @@ if (!HasLEAPotential) return SDValue(); - // Everything looks good, so pull the 'sext' ahead of the 'add'. - int64_t AddConstant = AddOp1->getSExtValue(); + // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'. + int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue(); SDValue AddOp0 = Add.getOperand(0); - SDValue NewSext = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(Sext), VT, AddOp0); + SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0); SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT); // The wider add is guaranteed to not wrap because both operands are // sign-extended. SDNodeFlags Flags; - Flags.setNoSignedWrap(true); - return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewSext, NewConstant, &Flags); + Flags.setNoSignedWrap(NSW); + Flags.setNoUnsignedWrap(NUW); + return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, &Flags); } /// (i8,i32 {s/z}ext ({s/u}divrem (i8 x, i8 y)) -> @@ -30681,7 +30695,7 @@ if (SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget)) return R; - if (SDValue NewAdd = promoteSextBeforeAddNSW(N, DAG, Subtarget)) + if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget)) return NewAdd; return SDValue(); @@ -30773,6 +30787,9 @@ if (SDValue DivRem8 = getDivRem8(N, DAG)) return DivRem8; + if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget)) + return NewAdd; + return SDValue(); } Index: llvm/trunk/test/CodeGen/X86/add-ext.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/add-ext.ll +++ llvm/trunk/test/CodeGen/X86/add-ext.ll @@ -0,0 +1,194 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s + +; The fundamental problem: an add separated from other arithmetic by a sign or +; zero extension can't be combined with the later instructions. However, if the +; first add is 'nsw' or 'nuw' respectively, then we can promote the extension +; ahead of that add to allow optimizations. + +define i64 @add_nsw_consts(i32 %i) { +; CHECK-LABEL: add_nsw_consts: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: addq $12, %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, 5 + %ext = sext i32 %add to i64 + %idx = add i64 %ext, 7 + ret i64 %idx +} + +; An x86 bonus: If we promote the sext ahead of the 'add nsw', +; we allow LEA formation and eliminate an add instruction. + +define i64 @add_nsw_sext_add(i32 %i, i64 %x) { +; CHECK-LABEL: add_nsw_sext_add: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: leaq 5(%rsi,%rax), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, 5 + %ext = sext i32 %add to i64 + %idx = add i64 %x, %ext + ret i64 %idx +} + +; Throw in a scale (left shift) because an LEA can do that too. +; Use a negative constant (LEA displacement) to verify that's handled correctly. + +define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) { +; CHECK-LABEL: add_nsw_sext_lsh_add: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, -5 + %ext = sext i32 %add to i64 + %shl = shl i64 %ext, 3 + %idx = add i64 %x, %shl + ret i64 %idx +} + +; Don't promote the sext if it has no users. The wider add instruction needs an +; extra byte to encode. + +define i64 @add_nsw_sext(i32 %i, i64 %x) { +; CHECK-LABEL: add_nsw_sext: +; CHECK: # BB#0: +; CHECK-NEXT: addl $5, %edi +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, 5 + %ext = sext i32 %add to i64 + ret i64 %ext +} + +; The typical use case: a 64-bit system where an 'int' is used as an index into an array. + +define i8* @gep8(i32 %i, i8* %x) { +; CHECK-LABEL: gep8: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: leaq 5(%rsi,%rax), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, 5 + %ext = sext i32 %add to i64 + %idx = getelementptr i8, i8* %x, i64 %ext + ret i8* %idx +} + +define i16* @gep16(i32 %i, i16* %x) { +; CHECK-LABEL: gep16: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: leaq -10(%rsi,%rax,2), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, -5 + %ext = sext i32 %add to i64 + %idx = getelementptr i16, i16* %x, i64 %ext + ret i16* %idx +} + +define i32* @gep32(i32 %i, i32* %x) { +; CHECK-LABEL: gep32: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: leaq 20(%rsi,%rax,4), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, 5 + %ext = sext i32 %add to i64 + %idx = getelementptr i32, i32* %x, i64 %ext + ret i32* %idx +} + +define i64* @gep64(i32 %i, i64* %x) { +; CHECK-LABEL: gep64: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, -5 + %ext = sext i32 %add to i64 + %idx = getelementptr i64, i64* %x, i64 %ext + ret i64* %idx +} + +; LEA can't scale by 16, but the adds can still be combined into an LEA. + +define i128* @gep128(i32 %i, i128* %x) { +; CHECK-LABEL: gep128: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %edi, %rax +; CHECK-NEXT: shlq $4, %rax +; CHECK-NEXT: leaq 80(%rsi,%rax), %rax +; CHECK-NEXT: retq + + %add = add nsw i32 %i, 5 + %ext = sext i32 %add to i64 + %idx = getelementptr i128, i128* %x, i64 %ext + ret i128* %idx +} + +; A bigger win can be achieved when there is more than one use of the +; sign extended value. In this case, we can eliminate sign extension +; instructions plus use more efficient addressing modes for memory ops. + +define void @PR20134(i32* %a, i32 %i) { +; CHECK-LABEL: PR20134: +; CHECK: # BB#0: +; CHECK-NEXT: movslq %esi, %rax +; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx +; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx +; CHECK-NEXT: movl %ecx, (%rdi,%rax,4) +; CHECK-NEXT: retq + + %add1 = add nsw i32 %i, 1 + %idx1 = sext i32 %add1 to i64 + %gep1 = getelementptr i32, i32* %a, i64 %idx1 + %load1 = load i32, i32* %gep1, align 4 + + %add2 = add nsw i32 %i, 2 + %idx2 = sext i32 %add2 to i64 + %gep2 = getelementptr i32, i32* %a, i64 %idx2 + %load2 = load i32, i32* %gep2, align 4 + + %add3 = add i32 %load1, %load2 + %idx3 = sext i32 %i to i64 + %gep3 = getelementptr i32, i32* %a, i64 %idx3 + store i32 %add3, i32* %gep3, align 4 + ret void +} + +; The same as @PR20134 but sign extension is replaced with zero extension +define void @PR20134_zext(i32* %a, i32 %i) { +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx +; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx +; CHECK-NEXT: movl %ecx, (%rdi,%rax,4) +; CHECK-NEXT: retq + + %add1 = add nuw i32 %i, 1 + %idx1 = zext i32 %add1 to i64 + %gep1 = getelementptr i32, i32* %a, i64 %idx1 + %load1 = load i32, i32* %gep1, align 4 + + %add2 = add nuw i32 %i, 2 + %idx2 = zext i32 %add2 to i64 + %gep2 = getelementptr i32, i32* %a, i64 %idx2 + %load2 = load i32, i32* %gep2, align 4 + + %add3 = add i32 %load1, %load2 + %idx3 = zext i32 %i to i64 + %gep3 = getelementptr i32, i32* %a, i64 %idx3 + store i32 %add3, i32* %gep3, align 4 + ret void +} Index: llvm/trunk/test/CodeGen/X86/add-nsw-sext.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/add-nsw-sext.ll +++ llvm/trunk/test/CodeGen/X86/add-nsw-sext.ll @@ -1,168 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s - -; The fundamental problem: an add separated from other arithmetic by a sext can't -; be combined with the later instructions. However, if the first add is 'nsw', -; then we can promote the sext ahead of that add to allow optimizations. - -define i64 @add_nsw_consts(i32 %i) { -; CHECK-LABEL: add_nsw_consts: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: addq $12, %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, 5 - %ext = sext i32 %add to i64 - %idx = add i64 %ext, 7 - ret i64 %idx -} - -; An x86 bonus: If we promote the sext ahead of the 'add nsw', -; we allow LEA formation and eliminate an add instruction. - -define i64 @add_nsw_sext_add(i32 %i, i64 %x) { -; CHECK-LABEL: add_nsw_sext_add: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq 5(%rsi,%rax), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, 5 - %ext = sext i32 %add to i64 - %idx = add i64 %x, %ext - ret i64 %idx -} - -; Throw in a scale (left shift) because an LEA can do that too. -; Use a negative constant (LEA displacement) to verify that's handled correctly. - -define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) { -; CHECK-LABEL: add_nsw_sext_lsh_add: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, -5 - %ext = sext i32 %add to i64 - %shl = shl i64 %ext, 3 - %idx = add i64 %x, %shl - ret i64 %idx -} - -; Don't promote the sext if it has no users. The wider add instruction needs an -; extra byte to encode. - -define i64 @add_nsw_sext(i32 %i, i64 %x) { -; CHECK-LABEL: add_nsw_sext: -; CHECK: # BB#0: -; CHECK-NEXT: addl $5, %edi -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, 5 - %ext = sext i32 %add to i64 - ret i64 %ext -} - -; The typical use case: a 64-bit system where an 'int' is used as an index into an array. - -define i8* @gep8(i32 %i, i8* %x) { -; CHECK-LABEL: gep8: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq 5(%rsi,%rax), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, 5 - %ext = sext i32 %add to i64 - %idx = getelementptr i8, i8* %x, i64 %ext - ret i8* %idx -} - -define i16* @gep16(i32 %i, i16* %x) { -; CHECK-LABEL: gep16: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq -10(%rsi,%rax,2), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, -5 - %ext = sext i32 %add to i64 - %idx = getelementptr i16, i16* %x, i64 %ext - ret i16* %idx -} - -define i32* @gep32(i32 %i, i32* %x) { -; CHECK-LABEL: gep32: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq 20(%rsi,%rax,4), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, 5 - %ext = sext i32 %add to i64 - %idx = getelementptr i32, i32* %x, i64 %ext - ret i32* %idx -} - -define i64* @gep64(i32 %i, i64* %x) { -; CHECK-LABEL: gep64: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq -40(%rsi,%rax,8), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, -5 - %ext = sext i32 %add to i64 - %idx = getelementptr i64, i64* %x, i64 %ext - ret i64* %idx -} - -; LEA can't scale by 16, but the adds can still be combined into an LEA. - -define i128* @gep128(i32 %i, i128* %x) { -; CHECK-LABEL: gep128: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: shlq $4, %rax -; CHECK-NEXT: leaq 80(%rsi,%rax), %rax -; CHECK-NEXT: retq - - %add = add nsw i32 %i, 5 - %ext = sext i32 %add to i64 - %idx = getelementptr i128, i128* %x, i64 %ext - ret i128* %idx -} - -; A bigger win can be achieved when there is more than one use of the -; sign extended value. In this case, we can eliminate sign extension -; instructions plus use more efficient addressing modes for memory ops. - -define void @PR20134(i32* %a, i32 %i) { -; CHECK-LABEL: PR20134: -; CHECK: # BB#0: -; CHECK-NEXT: movslq %esi, %rax -; CHECK-NEXT: movl 4(%rdi,%rax,4), %ecx -; CHECK-NEXT: addl 8(%rdi,%rax,4), %ecx -; CHECK-NEXT: movl %ecx, (%rdi,%rax,4) -; CHECK-NEXT: retq - - %add1 = add nsw i32 %i, 1 - %idx1 = sext i32 %add1 to i64 - %gep1 = getelementptr i32, i32* %a, i64 %idx1 - %load1 = load i32, i32* %gep1, align 4 - - %add2 = add nsw i32 %i, 2 - %idx2 = sext i32 %add2 to i64 - %gep2 = getelementptr i32, i32* %a, i64 %idx2 - %load2 = load i32, i32* %gep2, align 4 - - %add3 = add i32 %load1, %load2 - %idx3 = sext i32 %i to i64 - %gep3 = getelementptr i32, i32* %a, i64 %idx3 - store i32 %add3, i32* %gep3, align 4 - ret void -} -