diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -3390,6 +3390,7 @@ return false; SDValue NBits; + bool NegateNBits; // If we have BMI2's BZHI, we are ok with muti-use patterns. // Else, if we only have BMI1's BEXTR, we require one-use. @@ -3412,8 +3413,8 @@ }; // a) x & ((1 << nbits) + (-1)) - auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, - &NBits](SDValue Mask) -> bool { + auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, &NBits, + &NegateNBits](SDValue Mask) -> bool { // Match `add`. Must only have one use! if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask)) return false; @@ -3427,6 +3428,7 @@ if (!isOneConstant(M0->getOperand(0))) return false; NBits = M0->getOperand(1); + NegateNBits = false; return true; }; @@ -3439,7 +3441,7 @@ // b) x & ~(-1 << nbits) auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation, - &NBits](SDValue Mask) -> bool { + &NBits, &NegateNBits](SDValue Mask) -> bool { // Match `~()`. Must only have one use! if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask)) return false; @@ -3454,32 +3456,35 @@ if (!isAllOnes(M0->getOperand(0))) return false; NBits = M0->getOperand(1); + NegateNBits = false; return true; }; - // Match potentially-truncated (bitwidth - y) - auto matchShiftAmt = [checkOneUse, &NBits](SDValue ShiftAmt, - unsigned Bitwidth) { - // Skip over a truncate of the shift amount. - if (ShiftAmt.getOpcode() == ISD::TRUNCATE) { - ShiftAmt = ShiftAmt.getOperand(0); - // The trunc should have been the only user of the real shift amount. - if (!checkOneUse(ShiftAmt)) - return false; - } - // Match the shift amount as: (bitwidth - y). It should go away, too. - if (ShiftAmt.getOpcode() != ISD::SUB) - return false; - auto *V0 = dyn_cast(ShiftAmt.getOperand(0)); + // Try to match potentially-truncated shift amount as `(bitwidth - y)`, + // or leave the shift amount as-is, but then we'll have to negate it. + auto canonicalizeShiftAmt = [&NBits, &NegateNBits](SDValue ShiftAmt, + unsigned Bitwidth) { + NBits = ShiftAmt; + NegateNBits = true; + // Skip over a truncate of the shift amount, if any. + if (NBits.getOpcode() == ISD::TRUNCATE) + NBits = NBits.getOperand(0); + // Try to match the shift amount as (bitwidth - y). It should go away, too. + // If it doesn't match, that's fine, we'll just negate it ourselves. + if (NBits.getOpcode() != ISD::SUB) + return; + auto *V0 = dyn_cast(NBits.getOperand(0)); if (!V0 || V0->getZExtValue() != Bitwidth) - return false; - NBits = ShiftAmt.getOperand(1); - return true; + return; + NBits = NBits.getOperand(1); + NegateNBits = false; }; + // c) x & (-1 >> z) but then we'll have to subtract z from bitwidth + // or // c) x & (-1 >> (32 - y)) auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, - matchShiftAmt](SDValue Mask) -> bool { + canonicalizeShiftAmt](SDValue Mask) -> bool { // The mask itself may be truncated. Mask = peekThroughOneUseTruncation(Mask); unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits(); @@ -3493,13 +3498,16 @@ // The shift amount should not be used externally. if (!checkOneUse(M1)) return false; - return matchShiftAmt(M1, Bitwidth); + canonicalizeShiftAmt(M1, Bitwidth); + return true; }; SDValue X; + // d) x << z >> z but then we'll have to subtract z from bitwidth + // or // d) x << (32 - y) >> (32 - y) - auto matchPatternD = [checkOneUse, checkTwoUse, matchShiftAmt, + auto matchPatternD = [checkOneUse, checkTwoUse, canonicalizeShiftAmt, &X](SDNode *Node) -> bool { if (Node->getOpcode() != ISD::SRL) return false; @@ -3513,8 +3521,7 @@ // There should not be any uses of the shift amount outside of the pattern. if (N1 != N01 || !checkTwoUse(N1)) return false; - if (!matchShiftAmt(N1, Bitwidth)) - return false; + canonicalizeShiftAmt(N1, Bitwidth); X = N0->getOperand(0); return true; }; @@ -3538,6 +3545,11 @@ } else if (!matchPatternD(Node)) return false; + // If we need to negate the shift amount, require BMI2 BZHI support. + // It's just too unprofitable for BMI1 BEXTR. + if (NegateNBits && !Subtarget->hasBMI2()) + return false; + SDLoc DL(Node); // Truncate the shift amount. @@ -3552,11 +3564,22 @@ SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32); insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal); - NBits = SDValue( - CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::i32, ImplDef, - NBits, SRIdxVal), 0); + NBits = SDValue(CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL, + MVT::i32, ImplDef, NBits, SRIdxVal), + 0); insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); + // We might have matched the amount of high bits to be cleared, + // but we want the amount of low bits to be kept, so negate it then. + if (NegateNBits) { + SDValue BitWidthC = + CurDAG->getConstant(NVT.getScalarSizeInBits(), DL, MVT::i32); + insertDAGNode(*CurDAG, SDValue(Node, 0), BitWidthC); + + NBits = CurDAG->getNode(ISD::SUB, DL, MVT::i32, BitWidthC, NBits); + insertDAGNode(*CurDAG, SDValue(Node, 0), NBits); + } + if (Subtarget->hasBMI2()) { // Great, just emit the the BZHI.. if (NVT != MVT::i32) { diff --git a/llvm/test/CodeGen/X86/clear-highbits.ll b/llvm/test/CodeGen/X86/clear-highbits.ll --- a/llvm/test/CodeGen/X86/clear-highbits.ll +++ b/llvm/test/CodeGen/X86/clear-highbits.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI2,X86-FALLBACK0 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+cmov,+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI2,X86-FALLBACK1 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+cmov,+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI2,X86-FALLBACK2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI2,X86-BASELINE +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+cmov,+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI2,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+cmov,+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI2,X86-BMI1 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+cmov,+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI2 ; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+cmov,+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI2 ; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-NOBMI2 @@ -335,8 +335,9 @@ ; X86-BMI2-LABEL: clear_highbits32_c0: ; X86-BMI2: # %bb.0: ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx -; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax +; X86-BMI2-NEXT: movl $32, %ecx +; X86-BMI2-NEXT: subl %eax, %ecx +; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax ; X86-BMI2-NEXT: retl ; ; X64-NOBMI2-LABEL: clear_highbits32_c0: @@ -350,8 +351,9 @@ ; ; X64-BMI2-LABEL: clear_highbits32_c0: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxl %esi, %edi, %eax -; X64-BMI2-NEXT: shrxl %esi, %eax, %eax +; X64-BMI2-NEXT: movl $32, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhil %eax, %edi, %eax ; X64-BMI2-NEXT: retq %mask = lshr i32 -1, %numhighbits %masked = and i32 %mask, %val @@ -370,8 +372,9 @@ ; X86-BMI2-LABEL: clear_highbits32_c1_indexzext: ; X86-BMI2: # %bb.0: ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx -; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax +; X86-BMI2-NEXT: movl $32, %ecx +; X86-BMI2-NEXT: subl %eax, %ecx +; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax ; X86-BMI2-NEXT: retl ; ; X64-NOBMI2-LABEL: clear_highbits32_c1_indexzext: @@ -385,8 +388,9 @@ ; ; X64-BMI2-LABEL: clear_highbits32_c1_indexzext: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxl %esi, %edi, %eax -; X64-BMI2-NEXT: shrxl %esi, %eax, %eax +; X64-BMI2-NEXT: movl $32, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhil %eax, %edi, %eax ; X64-BMI2-NEXT: retq %sh_prom = zext i8 %numhighbits to i32 %mask = lshr i32 -1, %sh_prom @@ -408,8 +412,9 @@ ; X86-BMI2: # %bb.0: ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-BMI2-NEXT: shlxl %ecx, (%eax), %eax -; X86-BMI2-NEXT: shrxl %ecx, %eax, %eax +; X86-BMI2-NEXT: movl $32, %edx +; X86-BMI2-NEXT: subl %ecx, %edx +; X86-BMI2-NEXT: bzhil %edx, (%eax), %eax ; X86-BMI2-NEXT: retl ; ; X64-NOBMI2-LABEL: clear_highbits32_c2_load: @@ -423,8 +428,9 @@ ; ; X64-BMI2-LABEL: clear_highbits32_c2_load: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxl %esi, (%rdi), %eax -; X64-BMI2-NEXT: shrxl %esi, %eax, %eax +; X64-BMI2-NEXT: movl $32, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhil %eax, (%rdi), %eax ; X64-BMI2-NEXT: retq %val = load i32, i32* %w %mask = lshr i32 -1, %numhighbits @@ -446,8 +452,9 @@ ; X86-BMI2: # %bb.0: ; X86-BMI2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-BMI2-NEXT: shlxl %ecx, (%eax), %eax -; X86-BMI2-NEXT: shrxl %ecx, %eax, %eax +; X86-BMI2-NEXT: movl $32, %edx +; X86-BMI2-NEXT: subl %ecx, %edx +; X86-BMI2-NEXT: bzhil %edx, (%eax), %eax ; X86-BMI2-NEXT: retl ; ; X64-NOBMI2-LABEL: clear_highbits32_c3_load_indexzext: @@ -461,8 +468,9 @@ ; ; X64-BMI2-LABEL: clear_highbits32_c3_load_indexzext: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxl %esi, (%rdi), %eax -; X64-BMI2-NEXT: shrxl %esi, %eax, %eax +; X64-BMI2-NEXT: movl $32, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhil %eax, (%rdi), %eax ; X64-BMI2-NEXT: retq %val = load i32, i32* %w %sh_prom = zext i8 %numhighbits to i32 @@ -483,8 +491,9 @@ ; X86-BMI2-LABEL: clear_highbits32_c4_commutative: ; X86-BMI2: # %bb.0: ; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-BMI2-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx -; X86-BMI2-NEXT: shrxl %eax, %ecx, %eax +; X86-BMI2-NEXT: movl $32, %ecx +; X86-BMI2-NEXT: subl %eax, %ecx +; X86-BMI2-NEXT: bzhil %ecx, {{[0-9]+}}(%esp), %eax ; X86-BMI2-NEXT: retl ; ; X64-NOBMI2-LABEL: clear_highbits32_c4_commutative: @@ -498,8 +507,9 @@ ; ; X64-BMI2-LABEL: clear_highbits32_c4_commutative: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxl %esi, %edi, %eax -; X64-BMI2-NEXT: shrxl %esi, %eax, %eax +; X64-BMI2-NEXT: movl $32, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhil %eax, %edi, %eax ; X64-BMI2-NEXT: retq %mask = lshr i32 -1, %numhighbits %masked = and i32 %val, %mask ; swapped order @@ -511,58 +521,42 @@ ; ---------------------------------------------------------------------------- ; define i64 @clear_highbits64_c0(i64 %val, i64 %numhighbits) nounwind { -; X86-FALLBACK0-LABEL: clear_highbits64_c0: -; X86-FALLBACK0: # %bb.0: -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK0-NEXT: movl $-1, %eax -; X86-FALLBACK0-NEXT: movl $-1, %esi -; X86-FALLBACK0-NEXT: shrl %cl, %esi -; X86-FALLBACK0-NEXT: xorl %edx, %edx -; X86-FALLBACK0-NEXT: testb $32, %cl -; X86-FALLBACK0-NEXT: jne .LBB13_1 -; X86-FALLBACK0-NEXT: # %bb.2: -; X86-FALLBACK0-NEXT: movl %esi, %edx -; X86-FALLBACK0-NEXT: jmp .LBB13_3 -; X86-FALLBACK0-NEXT: .LBB13_1: -; X86-FALLBACK0-NEXT: movl %esi, %eax -; X86-FALLBACK0-NEXT: .LBB13_3: -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK0-NEXT: popl %esi -; X86-FALLBACK0-NEXT: retl +; X86-BASELINE-LABEL: clear_highbits64_c0: +; X86-BASELINE: # %bb.0: +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BASELINE-NEXT: movl $-1, %eax +; X86-BASELINE-NEXT: movl $-1, %esi +; X86-BASELINE-NEXT: shrl %cl, %esi +; X86-BASELINE-NEXT: xorl %edx, %edx +; X86-BASELINE-NEXT: testb $32, %cl +; X86-BASELINE-NEXT: jne .LBB13_1 +; X86-BASELINE-NEXT: # %bb.2: +; X86-BASELINE-NEXT: movl %esi, %edx +; X86-BASELINE-NEXT: jmp .LBB13_3 +; X86-BASELINE-NEXT: .LBB13_1: +; X86-BASELINE-NEXT: movl %esi, %eax +; X86-BASELINE-NEXT: .LBB13_3: +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BASELINE-NEXT: popl %esi +; X86-BASELINE-NEXT: retl ; -; X86-FALLBACK1-LABEL: clear_highbits64_c0: -; X86-FALLBACK1: # %bb.0: -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK1-NEXT: movl $-1, %esi -; X86-FALLBACK1-NEXT: movl $-1, %eax -; X86-FALLBACK1-NEXT: shrl %cl, %eax -; X86-FALLBACK1-NEXT: xorl %edx, %edx -; X86-FALLBACK1-NEXT: testb $32, %cl -; X86-FALLBACK1-NEXT: cmovel %eax, %edx -; X86-FALLBACK1-NEXT: cmovel %esi, %eax -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK1-NEXT: popl %esi -; X86-FALLBACK1-NEXT: retl -; -; X86-FALLBACK2-LABEL: clear_highbits64_c0: -; X86-FALLBACK2: # %bb.0: -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK2-NEXT: movl $-1, %esi -; X86-FALLBACK2-NEXT: movl $-1, %eax -; X86-FALLBACK2-NEXT: shrl %cl, %eax -; X86-FALLBACK2-NEXT: xorl %edx, %edx -; X86-FALLBACK2-NEXT: testb $32, %cl -; X86-FALLBACK2-NEXT: cmovel %eax, %edx -; X86-FALLBACK2-NEXT: cmovel %esi, %eax -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK2-NEXT: popl %esi -; X86-FALLBACK2-NEXT: retl +; X86-BMI1-LABEL: clear_highbits64_c0: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BMI1-NEXT: movl $-1, %esi +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: xorl %edx, %edx +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: cmovel %eax, %edx +; X86-BMI1-NEXT: cmovel %esi, %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: retl ; ; X86-BMI2-LABEL: clear_highbits64_c0: ; X86-BMI2: # %bb.0: @@ -590,8 +584,9 @@ ; ; X64-BMI2-LABEL: clear_highbits64_c0: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax -; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax +; X64-BMI2-NEXT: movl $64, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhiq %rax, %rdi, %rax ; X64-BMI2-NEXT: retq %mask = lshr i64 -1, %numhighbits %masked = and i64 %mask, %val @@ -599,58 +594,42 @@ } define i64 @clear_highbits64_c1_indexzext(i64 %val, i8 %numhighbits) nounwind { -; X86-FALLBACK0-LABEL: clear_highbits64_c1_indexzext: -; X86-FALLBACK0: # %bb.0: -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK0-NEXT: movl $-1, %eax -; X86-FALLBACK0-NEXT: movl $-1, %esi -; X86-FALLBACK0-NEXT: shrl %cl, %esi -; X86-FALLBACK0-NEXT: xorl %edx, %edx -; X86-FALLBACK0-NEXT: testb $32, %cl -; X86-FALLBACK0-NEXT: jne .LBB14_1 -; X86-FALLBACK0-NEXT: # %bb.2: -; X86-FALLBACK0-NEXT: movl %esi, %edx -; X86-FALLBACK0-NEXT: jmp .LBB14_3 -; X86-FALLBACK0-NEXT: .LBB14_1: -; X86-FALLBACK0-NEXT: movl %esi, %eax -; X86-FALLBACK0-NEXT: .LBB14_3: -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK0-NEXT: popl %esi -; X86-FALLBACK0-NEXT: retl +; X86-BASELINE-LABEL: clear_highbits64_c1_indexzext: +; X86-BASELINE: # %bb.0: +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BASELINE-NEXT: movl $-1, %eax +; X86-BASELINE-NEXT: movl $-1, %esi +; X86-BASELINE-NEXT: shrl %cl, %esi +; X86-BASELINE-NEXT: xorl %edx, %edx +; X86-BASELINE-NEXT: testb $32, %cl +; X86-BASELINE-NEXT: jne .LBB14_1 +; X86-BASELINE-NEXT: # %bb.2: +; X86-BASELINE-NEXT: movl %esi, %edx +; X86-BASELINE-NEXT: jmp .LBB14_3 +; X86-BASELINE-NEXT: .LBB14_1: +; X86-BASELINE-NEXT: movl %esi, %eax +; X86-BASELINE-NEXT: .LBB14_3: +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BASELINE-NEXT: popl %esi +; X86-BASELINE-NEXT: retl ; -; X86-FALLBACK1-LABEL: clear_highbits64_c1_indexzext: -; X86-FALLBACK1: # %bb.0: -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK1-NEXT: movl $-1, %esi -; X86-FALLBACK1-NEXT: movl $-1, %eax -; X86-FALLBACK1-NEXT: shrl %cl, %eax -; X86-FALLBACK1-NEXT: xorl %edx, %edx -; X86-FALLBACK1-NEXT: testb $32, %cl -; X86-FALLBACK1-NEXT: cmovel %eax, %edx -; X86-FALLBACK1-NEXT: cmovel %esi, %eax -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK1-NEXT: popl %esi -; X86-FALLBACK1-NEXT: retl -; -; X86-FALLBACK2-LABEL: clear_highbits64_c1_indexzext: -; X86-FALLBACK2: # %bb.0: -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK2-NEXT: movl $-1, %esi -; X86-FALLBACK2-NEXT: movl $-1, %eax -; X86-FALLBACK2-NEXT: shrl %cl, %eax -; X86-FALLBACK2-NEXT: xorl %edx, %edx -; X86-FALLBACK2-NEXT: testb $32, %cl -; X86-FALLBACK2-NEXT: cmovel %eax, %edx -; X86-FALLBACK2-NEXT: cmovel %esi, %eax -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK2-NEXT: popl %esi -; X86-FALLBACK2-NEXT: retl +; X86-BMI1-LABEL: clear_highbits64_c1_indexzext: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BMI1-NEXT: movl $-1, %esi +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: xorl %edx, %edx +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: cmovel %eax, %edx +; X86-BMI1-NEXT: cmovel %esi, %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: retl ; ; X86-BMI2-LABEL: clear_highbits64_c1_indexzext: ; X86-BMI2: # %bb.0: @@ -678,9 +657,9 @@ ; ; X64-BMI2-LABEL: clear_highbits64_c1_indexzext: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi -; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax -; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax +; X64-BMI2-NEXT: movl $64, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhiq %rax, %rdi, %rax ; X64-BMI2-NEXT: retq %sh_prom = zext i8 %numhighbits to i64 %mask = lshr i64 -1, %sh_prom @@ -689,67 +668,48 @@ } define i64 @clear_highbits64_c2_load(i64* %w, i64 %numhighbits) nounwind { -; X86-FALLBACK0-LABEL: clear_highbits64_c2_load: -; X86-FALLBACK0: # %bb.0: -; X86-FALLBACK0-NEXT: pushl %edi -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK0-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK0-NEXT: movl $-1, %eax -; X86-FALLBACK0-NEXT: movl $-1, %edi -; X86-FALLBACK0-NEXT: shrl %cl, %edi -; X86-FALLBACK0-NEXT: xorl %edx, %edx -; X86-FALLBACK0-NEXT: testb $32, %cl -; X86-FALLBACK0-NEXT: jne .LBB15_1 -; X86-FALLBACK0-NEXT: # %bb.2: -; X86-FALLBACK0-NEXT: movl %edi, %edx -; X86-FALLBACK0-NEXT: jmp .LBB15_3 -; X86-FALLBACK0-NEXT: .LBB15_1: -; X86-FALLBACK0-NEXT: movl %edi, %eax -; X86-FALLBACK0-NEXT: .LBB15_3: -; X86-FALLBACK0-NEXT: andl (%esi), %eax -; X86-FALLBACK0-NEXT: andl 4(%esi), %edx -; X86-FALLBACK0-NEXT: popl %esi -; X86-FALLBACK0-NEXT: popl %edi -; X86-FALLBACK0-NEXT: retl +; X86-BASELINE-LABEL: clear_highbits64_c2_load: +; X86-BASELINE: # %bb.0: +; X86-BASELINE-NEXT: pushl %edi +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-BASELINE-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BASELINE-NEXT: movl $-1, %eax +; X86-BASELINE-NEXT: movl $-1, %edi +; X86-BASELINE-NEXT: shrl %cl, %edi +; X86-BASELINE-NEXT: xorl %edx, %edx +; X86-BASELINE-NEXT: testb $32, %cl +; X86-BASELINE-NEXT: jne .LBB15_1 +; X86-BASELINE-NEXT: # %bb.2: +; X86-BASELINE-NEXT: movl %edi, %edx +; X86-BASELINE-NEXT: jmp .LBB15_3 +; X86-BASELINE-NEXT: .LBB15_1: +; X86-BASELINE-NEXT: movl %edi, %eax +; X86-BASELINE-NEXT: .LBB15_3: +; X86-BASELINE-NEXT: andl (%esi), %eax +; X86-BASELINE-NEXT: andl 4(%esi), %edx +; X86-BASELINE-NEXT: popl %esi +; X86-BASELINE-NEXT: popl %edi +; X86-BASELINE-NEXT: retl ; -; X86-FALLBACK1-LABEL: clear_highbits64_c2_load: -; X86-FALLBACK1: # %bb.0: -; X86-FALLBACK1-NEXT: pushl %edi -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK1-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK1-NEXT: movl $-1, %edi -; X86-FALLBACK1-NEXT: movl $-1, %eax -; X86-FALLBACK1-NEXT: shrl %cl, %eax -; X86-FALLBACK1-NEXT: xorl %edx, %edx -; X86-FALLBACK1-NEXT: testb $32, %cl -; X86-FALLBACK1-NEXT: cmovel %eax, %edx -; X86-FALLBACK1-NEXT: cmovel %edi, %eax -; X86-FALLBACK1-NEXT: andl (%esi), %eax -; X86-FALLBACK1-NEXT: andl 4(%esi), %edx -; X86-FALLBACK1-NEXT: popl %esi -; X86-FALLBACK1-NEXT: popl %edi -; X86-FALLBACK1-NEXT: retl -; -; X86-FALLBACK2-LABEL: clear_highbits64_c2_load: -; X86-FALLBACK2: # %bb.0: -; X86-FALLBACK2-NEXT: pushl %edi -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK2-NEXT: movl $-1, %edi -; X86-FALLBACK2-NEXT: movl $-1, %eax -; X86-FALLBACK2-NEXT: shrl %cl, %eax -; X86-FALLBACK2-NEXT: xorl %edx, %edx -; X86-FALLBACK2-NEXT: testb $32, %cl -; X86-FALLBACK2-NEXT: cmovel %eax, %edx -; X86-FALLBACK2-NEXT: cmovel %edi, %eax -; X86-FALLBACK2-NEXT: andl (%esi), %eax -; X86-FALLBACK2-NEXT: andl 4(%esi), %edx -; X86-FALLBACK2-NEXT: popl %esi -; X86-FALLBACK2-NEXT: popl %edi -; X86-FALLBACK2-NEXT: retl +; X86-BMI1-LABEL: clear_highbits64_c2_load: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %edi +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BMI1-NEXT: movl $-1, %edi +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: xorl %edx, %edx +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: cmovel %eax, %edx +; X86-BMI1-NEXT: cmovel %edi, %eax +; X86-BMI1-NEXT: andl (%esi), %eax +; X86-BMI1-NEXT: andl 4(%esi), %edx +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: popl %edi +; X86-BMI1-NEXT: retl ; ; X86-BMI2-LABEL: clear_highbits64_c2_load: ; X86-BMI2: # %bb.0: @@ -780,8 +740,9 @@ ; ; X64-BMI2-LABEL: clear_highbits64_c2_load: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxq %rsi, (%rdi), %rax -; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax +; X64-BMI2-NEXT: movl $64, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhiq %rax, (%rdi), %rax ; X64-BMI2-NEXT: retq %val = load i64, i64* %w %mask = lshr i64 -1, %numhighbits @@ -790,67 +751,48 @@ } define i64 @clear_highbits64_c3_load_indexzext(i64* %w, i8 %numhighbits) nounwind { -; X86-FALLBACK0-LABEL: clear_highbits64_c3_load_indexzext: -; X86-FALLBACK0: # %bb.0: -; X86-FALLBACK0-NEXT: pushl %edi -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK0-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK0-NEXT: movl $-1, %eax -; X86-FALLBACK0-NEXT: movl $-1, %edi -; X86-FALLBACK0-NEXT: shrl %cl, %edi -; X86-FALLBACK0-NEXT: xorl %edx, %edx -; X86-FALLBACK0-NEXT: testb $32, %cl -; X86-FALLBACK0-NEXT: jne .LBB16_1 -; X86-FALLBACK0-NEXT: # %bb.2: -; X86-FALLBACK0-NEXT: movl %edi, %edx -; X86-FALLBACK0-NEXT: jmp .LBB16_3 -; X86-FALLBACK0-NEXT: .LBB16_1: -; X86-FALLBACK0-NEXT: movl %edi, %eax -; X86-FALLBACK0-NEXT: .LBB16_3: -; X86-FALLBACK0-NEXT: andl (%esi), %eax -; X86-FALLBACK0-NEXT: andl 4(%esi), %edx -; X86-FALLBACK0-NEXT: popl %esi -; X86-FALLBACK0-NEXT: popl %edi -; X86-FALLBACK0-NEXT: retl +; X86-BASELINE-LABEL: clear_highbits64_c3_load_indexzext: +; X86-BASELINE: # %bb.0: +; X86-BASELINE-NEXT: pushl %edi +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-BASELINE-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BASELINE-NEXT: movl $-1, %eax +; X86-BASELINE-NEXT: movl $-1, %edi +; X86-BASELINE-NEXT: shrl %cl, %edi +; X86-BASELINE-NEXT: xorl %edx, %edx +; X86-BASELINE-NEXT: testb $32, %cl +; X86-BASELINE-NEXT: jne .LBB16_1 +; X86-BASELINE-NEXT: # %bb.2: +; X86-BASELINE-NEXT: movl %edi, %edx +; X86-BASELINE-NEXT: jmp .LBB16_3 +; X86-BASELINE-NEXT: .LBB16_1: +; X86-BASELINE-NEXT: movl %edi, %eax +; X86-BASELINE-NEXT: .LBB16_3: +; X86-BASELINE-NEXT: andl (%esi), %eax +; X86-BASELINE-NEXT: andl 4(%esi), %edx +; X86-BASELINE-NEXT: popl %esi +; X86-BASELINE-NEXT: popl %edi +; X86-BASELINE-NEXT: retl ; -; X86-FALLBACK1-LABEL: clear_highbits64_c3_load_indexzext: -; X86-FALLBACK1: # %bb.0: -; X86-FALLBACK1-NEXT: pushl %edi -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK1-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK1-NEXT: movl $-1, %edi -; X86-FALLBACK1-NEXT: movl $-1, %eax -; X86-FALLBACK1-NEXT: shrl %cl, %eax -; X86-FALLBACK1-NEXT: xorl %edx, %edx -; X86-FALLBACK1-NEXT: testb $32, %cl -; X86-FALLBACK1-NEXT: cmovel %eax, %edx -; X86-FALLBACK1-NEXT: cmovel %edi, %eax -; X86-FALLBACK1-NEXT: andl (%esi), %eax -; X86-FALLBACK1-NEXT: andl 4(%esi), %edx -; X86-FALLBACK1-NEXT: popl %esi -; X86-FALLBACK1-NEXT: popl %edi -; X86-FALLBACK1-NEXT: retl -; -; X86-FALLBACK2-LABEL: clear_highbits64_c3_load_indexzext: -; X86-FALLBACK2: # %bb.0: -; X86-FALLBACK2-NEXT: pushl %edi -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: movl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK2-NEXT: movl $-1, %edi -; X86-FALLBACK2-NEXT: movl $-1, %eax -; X86-FALLBACK2-NEXT: shrl %cl, %eax -; X86-FALLBACK2-NEXT: xorl %edx, %edx -; X86-FALLBACK2-NEXT: testb $32, %cl -; X86-FALLBACK2-NEXT: cmovel %eax, %edx -; X86-FALLBACK2-NEXT: cmovel %edi, %eax -; X86-FALLBACK2-NEXT: andl (%esi), %eax -; X86-FALLBACK2-NEXT: andl 4(%esi), %edx -; X86-FALLBACK2-NEXT: popl %esi -; X86-FALLBACK2-NEXT: popl %edi -; X86-FALLBACK2-NEXT: retl +; X86-BMI1-LABEL: clear_highbits64_c3_load_indexzext: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %edi +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BMI1-NEXT: movl $-1, %edi +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: xorl %edx, %edx +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: cmovel %eax, %edx +; X86-BMI1-NEXT: cmovel %edi, %eax +; X86-BMI1-NEXT: andl (%esi), %eax +; X86-BMI1-NEXT: andl 4(%esi), %edx +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: popl %edi +; X86-BMI1-NEXT: retl ; ; X86-BMI2-LABEL: clear_highbits64_c3_load_indexzext: ; X86-BMI2: # %bb.0: @@ -881,9 +823,9 @@ ; ; X64-BMI2-LABEL: clear_highbits64_c3_load_indexzext: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: # kill: def $esi killed $esi def $rsi -; X64-BMI2-NEXT: shlxq %rsi, (%rdi), %rax -; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax +; X64-BMI2-NEXT: movl $64, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhiq %rax, (%rdi), %rax ; X64-BMI2-NEXT: retq %val = load i64, i64* %w %sh_prom = zext i8 %numhighbits to i64 @@ -893,58 +835,42 @@ } define i64 @clear_highbits64_c4_commutative(i64 %val, i64 %numhighbits) nounwind { -; X86-FALLBACK0-LABEL: clear_highbits64_c4_commutative: -; X86-FALLBACK0: # %bb.0: -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK0-NEXT: movl $-1, %eax -; X86-FALLBACK0-NEXT: movl $-1, %esi -; X86-FALLBACK0-NEXT: shrl %cl, %esi -; X86-FALLBACK0-NEXT: xorl %edx, %edx -; X86-FALLBACK0-NEXT: testb $32, %cl -; X86-FALLBACK0-NEXT: jne .LBB17_1 -; X86-FALLBACK0-NEXT: # %bb.2: -; X86-FALLBACK0-NEXT: movl %esi, %edx -; X86-FALLBACK0-NEXT: jmp .LBB17_3 -; X86-FALLBACK0-NEXT: .LBB17_1: -; X86-FALLBACK0-NEXT: movl %esi, %eax -; X86-FALLBACK0-NEXT: .LBB17_3: -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK0-NEXT: popl %esi -; X86-FALLBACK0-NEXT: retl +; X86-BASELINE-LABEL: clear_highbits64_c4_commutative: +; X86-BASELINE: # %bb.0: +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BASELINE-NEXT: movl $-1, %eax +; X86-BASELINE-NEXT: movl $-1, %esi +; X86-BASELINE-NEXT: shrl %cl, %esi +; X86-BASELINE-NEXT: xorl %edx, %edx +; X86-BASELINE-NEXT: testb $32, %cl +; X86-BASELINE-NEXT: jne .LBB17_1 +; X86-BASELINE-NEXT: # %bb.2: +; X86-BASELINE-NEXT: movl %esi, %edx +; X86-BASELINE-NEXT: jmp .LBB17_3 +; X86-BASELINE-NEXT: .LBB17_1: +; X86-BASELINE-NEXT: movl %esi, %eax +; X86-BASELINE-NEXT: .LBB17_3: +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BASELINE-NEXT: popl %esi +; X86-BASELINE-NEXT: retl ; -; X86-FALLBACK1-LABEL: clear_highbits64_c4_commutative: -; X86-FALLBACK1: # %bb.0: -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK1-NEXT: movl $-1, %esi -; X86-FALLBACK1-NEXT: movl $-1, %eax -; X86-FALLBACK1-NEXT: shrl %cl, %eax -; X86-FALLBACK1-NEXT: xorl %edx, %edx -; X86-FALLBACK1-NEXT: testb $32, %cl -; X86-FALLBACK1-NEXT: cmovel %eax, %edx -; X86-FALLBACK1-NEXT: cmovel %esi, %eax -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK1-NEXT: popl %esi -; X86-FALLBACK1-NEXT: retl -; -; X86-FALLBACK2-LABEL: clear_highbits64_c4_commutative: -; X86-FALLBACK2: # %bb.0: -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK2-NEXT: movl $-1, %esi -; X86-FALLBACK2-NEXT: movl $-1, %eax -; X86-FALLBACK2-NEXT: shrl %cl, %eax -; X86-FALLBACK2-NEXT: xorl %edx, %edx -; X86-FALLBACK2-NEXT: testb $32, %cl -; X86-FALLBACK2-NEXT: cmovel %eax, %edx -; X86-FALLBACK2-NEXT: cmovel %esi, %eax -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %eax -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %edx -; X86-FALLBACK2-NEXT: popl %esi -; X86-FALLBACK2-NEXT: retl +; X86-BMI1-LABEL: clear_highbits64_c4_commutative: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BMI1-NEXT: movl $-1, %esi +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: xorl %edx, %edx +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: cmovel %eax, %edx +; X86-BMI1-NEXT: cmovel %esi, %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %eax +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edx +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: retl ; ; X86-BMI2-LABEL: clear_highbits64_c4_commutative: ; X86-BMI2: # %bb.0: @@ -972,8 +898,9 @@ ; ; X64-BMI2-LABEL: clear_highbits64_c4_commutative: ; X64-BMI2: # %bb.0: -; X64-BMI2-NEXT: shlxq %rsi, %rdi, %rax -; X64-BMI2-NEXT: shrxq %rsi, %rax, %rax +; X64-BMI2-NEXT: movl $64, %eax +; X64-BMI2-NEXT: subl %esi, %eax +; X64-BMI2-NEXT: bzhiq %rax, %rdi, %rax ; X64-BMI2-NEXT: retq %mask = lshr i64 -1, %numhighbits %masked = and i64 %val, %mask ; swapped order @@ -1005,17 +932,18 @@ ; ; X86-BMI2-LABEL: oneuse32: ; X86-BMI2: # %bb.0: -; X86-BMI2-NEXT: pushl %esi +; X86-BMI2-NEXT: pushl %ebx ; X86-BMI2-NEXT: subl $8, %esp -; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-BMI2-NEXT: movl $-1, %ecx -; X86-BMI2-NEXT: shrxl %eax, %ecx, %esi -; X86-BMI2-NEXT: movl %esi, (%esp) +; X86-BMI2-NEXT: movb {{[0-9]+}}(%esp), %bl +; X86-BMI2-NEXT: movl $-1, %eax +; X86-BMI2-NEXT: shrxl %ebx, %eax, %eax +; X86-BMI2-NEXT: movl %eax, (%esp) ; X86-BMI2-NEXT: calll use32@PLT -; X86-BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi -; X86-BMI2-NEXT: movl %esi, %eax +; X86-BMI2-NEXT: movl $32, %eax +; X86-BMI2-NEXT: subl %ebx, %eax +; X86-BMI2-NEXT: bzhil %eax, {{[0-9]+}}(%esp), %eax ; X86-BMI2-NEXT: addl $8, %esp -; X86-BMI2-NEXT: popl %esi +; X86-BMI2-NEXT: popl %ebx ; X86-BMI2-NEXT: retl ; ; X64-NOBMI2-LABEL: oneuse32: @@ -1042,13 +970,14 @@ ; X64-BMI2-NEXT: pushq %rbp ; X64-BMI2-NEXT: pushq %rbx ; X64-BMI2-NEXT: pushq %rax -; X64-BMI2-NEXT: movl %edi, %ebx +; X64-BMI2-NEXT: movl %esi, %ebx +; X64-BMI2-NEXT: movl %edi, %ebp ; X64-BMI2-NEXT: movl $-1, %eax -; X64-BMI2-NEXT: shrxl %esi, %eax, %ebp -; X64-BMI2-NEXT: movl %ebp, %edi +; X64-BMI2-NEXT: shrxl %esi, %eax, %edi ; X64-BMI2-NEXT: callq use32@PLT -; X64-BMI2-NEXT: andl %ebx, %ebp -; X64-BMI2-NEXT: movl %ebp, %eax +; X64-BMI2-NEXT: movl $32, %eax +; X64-BMI2-NEXT: subl %ebx, %eax +; X64-BMI2-NEXT: bzhil %eax, %ebp, %eax ; X64-BMI2-NEXT: addq $8, %rsp ; X64-BMI2-NEXT: popq %rbx ; X64-BMI2-NEXT: popq %rbp @@ -1060,88 +989,61 @@ } define i64 @oneuse64(i64 %val, i64 %numhighbits) nounwind { -; X86-FALLBACK0-LABEL: oneuse64: -; X86-FALLBACK0: # %bb.0: -; X86-FALLBACK0-NEXT: pushl %edi -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: pushl %eax -; X86-FALLBACK0-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK0-NEXT: movl $-1, %esi -; X86-FALLBACK0-NEXT: movl $-1, %edi -; X86-FALLBACK0-NEXT: shrl %cl, %edi -; X86-FALLBACK0-NEXT: testb $32, %cl -; X86-FALLBACK0-NEXT: je .LBB19_2 -; X86-FALLBACK0-NEXT: # %bb.1: -; X86-FALLBACK0-NEXT: movl %edi, %esi -; X86-FALLBACK0-NEXT: xorl %edi, %edi -; X86-FALLBACK0-NEXT: .LBB19_2: -; X86-FALLBACK0-NEXT: subl $8, %esp -; X86-FALLBACK0-NEXT: pushl %edi -; X86-FALLBACK0-NEXT: pushl %esi -; X86-FALLBACK0-NEXT: calll use64@PLT -; X86-FALLBACK0-NEXT: addl $16, %esp -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK0-NEXT: andl {{[0-9]+}}(%esp), %edi -; X86-FALLBACK0-NEXT: movl %esi, %eax -; X86-FALLBACK0-NEXT: movl %edi, %edx -; X86-FALLBACK0-NEXT: addl $4, %esp -; X86-FALLBACK0-NEXT: popl %esi -; X86-FALLBACK0-NEXT: popl %edi -; X86-FALLBACK0-NEXT: retl +; X86-BASELINE-LABEL: oneuse64: +; X86-BASELINE: # %bb.0: +; X86-BASELINE-NEXT: pushl %edi +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: pushl %eax +; X86-BASELINE-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BASELINE-NEXT: movl $-1, %esi +; X86-BASELINE-NEXT: movl $-1, %edi +; X86-BASELINE-NEXT: shrl %cl, %edi +; X86-BASELINE-NEXT: testb $32, %cl +; X86-BASELINE-NEXT: je .LBB19_2 +; X86-BASELINE-NEXT: # %bb.1: +; X86-BASELINE-NEXT: movl %edi, %esi +; X86-BASELINE-NEXT: xorl %edi, %edi +; X86-BASELINE-NEXT: .LBB19_2: +; X86-BASELINE-NEXT: subl $8, %esp +; X86-BASELINE-NEXT: pushl %edi +; X86-BASELINE-NEXT: pushl %esi +; X86-BASELINE-NEXT: calll use64@PLT +; X86-BASELINE-NEXT: addl $16, %esp +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %esi +; X86-BASELINE-NEXT: andl {{[0-9]+}}(%esp), %edi +; X86-BASELINE-NEXT: movl %esi, %eax +; X86-BASELINE-NEXT: movl %edi, %edx +; X86-BASELINE-NEXT: addl $4, %esp +; X86-BASELINE-NEXT: popl %esi +; X86-BASELINE-NEXT: popl %edi +; X86-BASELINE-NEXT: retl ; -; X86-FALLBACK1-LABEL: oneuse64: -; X86-FALLBACK1: # %bb.0: -; X86-FALLBACK1-NEXT: pushl %edi -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: pushl %eax -; X86-FALLBACK1-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK1-NEXT: movl $-1, %esi -; X86-FALLBACK1-NEXT: movl $-1, %eax -; X86-FALLBACK1-NEXT: shrl %cl, %eax -; X86-FALLBACK1-NEXT: xorl %edi, %edi -; X86-FALLBACK1-NEXT: testb $32, %cl -; X86-FALLBACK1-NEXT: cmovnel %eax, %esi -; X86-FALLBACK1-NEXT: cmovel %eax, %edi -; X86-FALLBACK1-NEXT: subl $8, %esp -; X86-FALLBACK1-NEXT: pushl %edi -; X86-FALLBACK1-NEXT: pushl %esi -; X86-FALLBACK1-NEXT: calll use64@PLT -; X86-FALLBACK1-NEXT: addl $16, %esp -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK1-NEXT: andl {{[0-9]+}}(%esp), %edi -; X86-FALLBACK1-NEXT: movl %esi, %eax -; X86-FALLBACK1-NEXT: movl %edi, %edx -; X86-FALLBACK1-NEXT: addl $4, %esp -; X86-FALLBACK1-NEXT: popl %esi -; X86-FALLBACK1-NEXT: popl %edi -; X86-FALLBACK1-NEXT: retl -; -; X86-FALLBACK2-LABEL: oneuse64: -; X86-FALLBACK2: # %bb.0: -; X86-FALLBACK2-NEXT: pushl %edi -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: pushl %eax -; X86-FALLBACK2-NEXT: movb {{[0-9]+}}(%esp), %cl -; X86-FALLBACK2-NEXT: movl $-1, %esi -; X86-FALLBACK2-NEXT: movl $-1, %eax -; X86-FALLBACK2-NEXT: shrl %cl, %eax -; X86-FALLBACK2-NEXT: xorl %edi, %edi -; X86-FALLBACK2-NEXT: testb $32, %cl -; X86-FALLBACK2-NEXT: cmovnel %eax, %esi -; X86-FALLBACK2-NEXT: cmovel %eax, %edi -; X86-FALLBACK2-NEXT: subl $8, %esp -; X86-FALLBACK2-NEXT: pushl %edi -; X86-FALLBACK2-NEXT: pushl %esi -; X86-FALLBACK2-NEXT: calll use64@PLT -; X86-FALLBACK2-NEXT: addl $16, %esp -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %esi -; X86-FALLBACK2-NEXT: andl {{[0-9]+}}(%esp), %edi -; X86-FALLBACK2-NEXT: movl %esi, %eax -; X86-FALLBACK2-NEXT: movl %edi, %edx -; X86-FALLBACK2-NEXT: addl $4, %esp -; X86-FALLBACK2-NEXT: popl %esi -; X86-FALLBACK2-NEXT: popl %edi -; X86-FALLBACK2-NEXT: retl +; X86-BMI1-LABEL: oneuse64: +; X86-BMI1: # %bb.0: +; X86-BMI1-NEXT: pushl %edi +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: pushl %eax +; X86-BMI1-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-BMI1-NEXT: movl $-1, %esi +; X86-BMI1-NEXT: movl $-1, %eax +; X86-BMI1-NEXT: shrl %cl, %eax +; X86-BMI1-NEXT: xorl %edi, %edi +; X86-BMI1-NEXT: testb $32, %cl +; X86-BMI1-NEXT: cmovnel %eax, %esi +; X86-BMI1-NEXT: cmovel %eax, %edi +; X86-BMI1-NEXT: subl $8, %esp +; X86-BMI1-NEXT: pushl %edi +; X86-BMI1-NEXT: pushl %esi +; X86-BMI1-NEXT: calll use64@PLT +; X86-BMI1-NEXT: addl $16, %esp +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %esi +; X86-BMI1-NEXT: andl {{[0-9]+}}(%esp), %edi +; X86-BMI1-NEXT: movl %esi, %eax +; X86-BMI1-NEXT: movl %edi, %edx +; X86-BMI1-NEXT: addl $4, %esp +; X86-BMI1-NEXT: popl %esi +; X86-BMI1-NEXT: popl %edi +; X86-BMI1-NEXT: retl ; ; X86-BMI2-LABEL: oneuse64: ; X86-BMI2: # %bb.0: @@ -1193,13 +1095,14 @@ ; X64-BMI2-NEXT: pushq %r14 ; X64-BMI2-NEXT: pushq %rbx ; X64-BMI2-NEXT: pushq %rax +; X64-BMI2-NEXT: movq %rsi, %rbx ; X64-BMI2-NEXT: movq %rdi, %r14 ; X64-BMI2-NEXT: movq $-1, %rax -; X64-BMI2-NEXT: shrxq %rsi, %rax, %rbx -; X64-BMI2-NEXT: movq %rbx, %rdi +; X64-BMI2-NEXT: shrxq %rsi, %rax, %rdi ; X64-BMI2-NEXT: callq use64@PLT -; X64-BMI2-NEXT: andq %r14, %rbx -; X64-BMI2-NEXT: movq %rbx, %rax +; X64-BMI2-NEXT: movl $64, %eax +; X64-BMI2-NEXT: subl %ebx, %eax +; X64-BMI2-NEXT: bzhiq %rax, %r14, %rax ; X64-BMI2-NEXT: addq $8, %rsp ; X64-BMI2-NEXT: popq %rbx ; X64-BMI2-NEXT: popq %r14