Index: include/llvm/CodeGen/TargetLowering.h =================================================================== --- include/llvm/CodeGen/TargetLowering.h +++ include/llvm/CodeGen/TargetLowering.h @@ -509,6 +509,16 @@ return hasAndNotCompare(X); } + /// There are two ways to clear extreme bits (either low or high): + /// Mask: x & (-1 << y) (the instcombine canonical form) + /// Shifts: x >> y << y + /// Different targets may have different preferences. + /// Returns true if the shift variant is preferred. + virtual bool preferShiftsToClearExtremeBits(SDValue X) const { + // By default, let's assume that everyone prefers masking. + return false; + } + /// Return true if the target wants to use the optimization that /// turns ext(promotableInst1(...(promotableInstN(load)))) into /// promotedInst1(...(promotedInstN(ext(load)))). Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -409,6 +409,7 @@ SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1, const SDLoc &DL); SDValue unfoldMaskedMerge(SDNode *N); + SDValue unfoldExtremeBitClearingToShifts(SDNode *N); SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, const SDLoc &DL, bool foldBooleans); SDValue rebuildSetCC(SDValue N); @@ -4171,6 +4172,63 @@ return false; } +// Unfold +// x & (-1 'logical shift' y) +// To +// (x 'opposite logical shift' y) 'logical shift' y +// if it is better for performance. +SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) { + assert(N->getOpcode() == ISD::AND); + + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + + // Do we actually prefer shifts over mask? + if (!TLI.preferShiftsToClearExtremeBits(N0)) + return SDValue(); + + // Try to match (-1 '[outer] logical shift' y) + unsigned OuterShift; + unsigned InnerShift; // The opposite direction to the OuterShift. + SDValue Y; // Shift amount. + auto matchMask = [&OuterShift, &InnerShift, &Y](SDValue M) -> bool { + if (!M.hasOneUse()) + return false; + switch (OuterShift = M->getOpcode()) { + case ISD::SHL: + InnerShift = ISD::SRL; + break; + case ISD::SRL: + InnerShift = ISD::SHL; + break; + default: + return false; + } + if (!isAllOnesConstant(M->getOperand(0))) + return false; + Y = M->getOperand(1); + return true; + }; + + SDValue X; + if (matchMask(N1)) + X = N0; + else if (matchMask(N0)) + X = N1; + else + return SDValue(); + + SDLoc DL(N); + EVT VT = N->getValueType(0); + + // tmp = x 'opposite logical shift' y + SDValue T0 = DAG.getNode(InnerShift, DL, VT, X, Y); + // ret = tmp 'logical shift' y + SDValue T1 = DAG.getNode(OuterShift, DL, VT, T0, Y); + + return T1; +} + SDValue DAGCombiner::visitAND(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -4468,6 +4526,9 @@ return BSwap; } + if (SDValue Shifts = unfoldExtremeBitClearingToShifts(N)) + return Shifts; + return SDValue(); } Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -831,6 +831,8 @@ bool hasAndNot(SDValue Y) const override; + bool preferShiftsToClearExtremeBits(SDValue Y) const override; + bool convertSetCCLogicToBitwiseLogic(EVT VT) const override { return VT.isScalarInteger(); } Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -4785,6 +4785,22 @@ return Subtarget.hasSSE2(); } +bool X86TargetLowering::preferShiftsToClearExtremeBits(SDValue Y) const { + EVT VT = Y.getValueType(); + + // For vectors, we don't have a preference, but we probably want a mask. + if (VT.isVector()) + return false; + + // We want BMI2's SHLX/SHRX Shifts Without Affecting Flags. + if (!Subtarget.hasBMI2()) + return false; + + // There are only 32-bit and 64-bit forms for SHLX/SHRX. + // And 64-bit form is only preferred on 64-bit targets. + return (VT == MVT::i32 || (VT == MVT::i64 && Subtarget.is64Bit())); +} + MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const { MVT VT = MVT::getIntegerVT(NumBits); if (isTypeLegal(VT)) Index: test/CodeGen/X86/clear-highbits.ll =================================================================== --- test/CodeGen/X86/clear-highbits.ll +++ test/CodeGen/X86/clear-highbits.ll @@ -33,9 +33,8 @@ ; X86-TRANSFORM-LABEL: clear_highbits32_c0: ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shrxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_highbits32_c0: @@ -48,9 +47,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits32_c0: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shlxl %esi, %edi, %eax ; X64-TRANSFORM-NEXT: shrxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %mask = lshr i32 -1, %numhighbits %masked = and i32 %mask, %val @@ -69,9 +67,8 @@ ; X86-TRANSFORM-LABEL: clear_highbits32_c1_indexzext: ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shrxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_highbits32_c1_indexzext: @@ -84,9 +81,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits32_c1_indexzext: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shlxl %esi, %edi, %eax ; X64-TRANSFORM-NEXT: shrxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %sh_prom = zext i8 %numhighbits to i32 %mask = lshr i32 -1, %sh_prom @@ -106,11 +102,10 @@ ; ; X86-TRANSFORM-LABEL: clear_highbits32_c2_load: ; X86-TRANSFORM: # %bb.0: -; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %edx -; X86-TRANSFORM-NEXT: shrxl %eax, %edx, %eax -; X86-TRANSFORM-NEXT: andl (%ecx), %eax +; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-TRANSFORM-NEXT: shlxl %ecx, (%eax), %eax +; X86-TRANSFORM-NEXT: shrxl %ecx, %eax, %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_highbits32_c2_load: @@ -123,9 +118,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits32_c2_load: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shlxl %esi, (%rdi), %eax ; X64-TRANSFORM-NEXT: shrxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl (%rdi), %eax ; X64-TRANSFORM-NEXT: retq %val = load i32, i32* %w %mask = lshr i32 -1, %numhighbits @@ -145,11 +139,10 @@ ; ; X86-TRANSFORM-LABEL: clear_highbits32_c3_load_indexzext: ; X86-TRANSFORM: # %bb.0: -; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %edx -; X86-TRANSFORM-NEXT: shrxl %eax, %edx, %eax -; X86-TRANSFORM-NEXT: andl (%ecx), %eax +; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-TRANSFORM-NEXT: shlxl %ecx, (%eax), %eax +; X86-TRANSFORM-NEXT: shrxl %ecx, %eax, %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_highbits32_c3_load_indexzext: @@ -162,9 +155,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits32_c3_load_indexzext: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shlxl %esi, (%rdi), %eax ; X64-TRANSFORM-NEXT: shrxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl (%rdi), %eax ; X64-TRANSFORM-NEXT: retq %val = load i32, i32* %w %sh_prom = zext i8 %numhighbits to i32 @@ -185,9 +177,8 @@ ; X86-TRANSFORM-LABEL: clear_highbits32_c4_commutative: ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shlxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shrxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_highbits32_c4_commutative: @@ -200,9 +191,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits32_c4_commutative: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shlxl %esi, %edi, %eax ; X64-TRANSFORM-NEXT: shrxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %mask = lshr i32 -1, %numhighbits %masked = and i32 %val, %mask ; swapped order @@ -255,9 +245,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits64_c0: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shlxq %rsi, %rdi, %rax ; X64-TRANSFORM-NEXT: shrxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %mask = lshr i64 -1, %numhighbits %masked = and i64 %mask, %val @@ -309,9 +298,8 @@ ; X64-TRANSFORM-LABEL: clear_highbits64_c1_indexzext: ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: # kill: def $esi killed $esi def $rsi -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shlxq %rsi, %rdi, %rax ; X64-TRANSFORM-NEXT: shrxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %sh_prom = zext i8 %numhighbits to i64 %mask = lshr i64 -1, %sh_prom @@ -369,9 +357,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits64_c2_load: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shlxq %rsi, (%rdi), %rax ; X64-TRANSFORM-NEXT: shrxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq (%rdi), %rax ; X64-TRANSFORM-NEXT: retq %val = load i64, i64* %w %mask = lshr i64 -1, %numhighbits @@ -430,9 +417,8 @@ ; X64-TRANSFORM-LABEL: clear_highbits64_c3_load_indexzext: ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: # kill: def $esi killed $esi def $rsi -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shlxq %rsi, (%rdi), %rax ; X64-TRANSFORM-NEXT: shrxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq (%rdi), %rax ; X64-TRANSFORM-NEXT: retq %val = load i64, i64* %w %sh_prom = zext i8 %numhighbits to i64 @@ -485,9 +471,8 @@ ; ; X64-TRANSFORM-LABEL: clear_highbits64_c4_commutative: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shlxq %rsi, %rdi, %rax ; X64-TRANSFORM-NEXT: shrxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %mask = lshr i64 -1, %numhighbits %masked = and i64 %val, %mask ; swapped order Index: test/CodeGen/X86/clear-lowbits.ll =================================================================== --- test/CodeGen/X86/clear-lowbits.ll +++ test/CodeGen/X86/clear-lowbits.ll @@ -33,9 +33,8 @@ ; X86-TRANSFORM-LABEL: clear_lowbits32_c0: ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_c0: @@ -48,9 +47,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits32_c0: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shrxl %esi, %edi, %eax ; X64-TRANSFORM-NEXT: shlxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %mask = shl i32 -1, %numlowbits %masked = and i32 %mask, %val @@ -69,9 +67,8 @@ ; X86-TRANSFORM-LABEL: clear_lowbits32_c1_indexzext: ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_c1_indexzext: @@ -84,9 +81,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits32_c1_indexzext: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shrxl %esi, %edi, %eax ; X64-TRANSFORM-NEXT: shlxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %sh_prom = zext i8 %numlowbits to i32 %mask = shl i32 -1, %sh_prom @@ -106,11 +102,10 @@ ; ; X86-TRANSFORM-LABEL: clear_lowbits32_c2_load: ; X86-TRANSFORM: # %bb.0: -; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %edx -; X86-TRANSFORM-NEXT: shlxl %eax, %edx, %eax -; X86-TRANSFORM-NEXT: andl (%ecx), %eax +; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-TRANSFORM-NEXT: shrxl %ecx, (%eax), %eax +; X86-TRANSFORM-NEXT: shlxl %ecx, %eax, %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_c2_load: @@ -123,9 +118,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits32_c2_load: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shrxl %esi, (%rdi), %eax ; X64-TRANSFORM-NEXT: shlxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl (%rdi), %eax ; X64-TRANSFORM-NEXT: retq %val = load i32, i32* %w %mask = shl i32 -1, %numlowbits @@ -145,11 +139,10 @@ ; ; X86-TRANSFORM-LABEL: clear_lowbits32_c3_load_indexzext: ; X86-TRANSFORM: # %bb.0: -; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %edx -; X86-TRANSFORM-NEXT: shlxl %eax, %edx, %eax -; X86-TRANSFORM-NEXT: andl (%ecx), %eax +; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-TRANSFORM-NEXT: shrxl %ecx, (%eax), %eax +; X86-TRANSFORM-NEXT: shlxl %ecx, %eax, %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_c3_load_indexzext: @@ -162,9 +155,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits32_c3_load_indexzext: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shrxl %esi, (%rdi), %eax ; X64-TRANSFORM-NEXT: shlxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl (%rdi), %eax ; X64-TRANSFORM-NEXT: retq %val = load i32, i32* %w %sh_prom = zext i8 %numlowbits to i32 @@ -185,9 +177,8 @@ ; X86-TRANSFORM-LABEL: clear_lowbits32_c4_commutative: ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_c4_commutative: @@ -200,9 +191,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits32_c4_commutative: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movl $-1, %eax +; X64-TRANSFORM-NEXT: shrxl %esi, %edi, %eax ; X64-TRANSFORM-NEXT: shlxl %esi, %eax, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %mask = shl i32 -1, %numlowbits %masked = and i32 %val, %mask ; swapped order @@ -255,9 +245,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits64_c0: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shrxq %rsi, %rdi, %rax ; X64-TRANSFORM-NEXT: shlxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %mask = shl i64 -1, %numlowbits %masked = and i64 %mask, %val @@ -309,9 +298,8 @@ ; X64-TRANSFORM-LABEL: clear_lowbits64_c1_indexzext: ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: # kill: def $esi killed $esi def $rsi -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shrxq %rsi, %rdi, %rax ; X64-TRANSFORM-NEXT: shlxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %sh_prom = zext i8 %numlowbits to i64 %mask = shl i64 -1, %sh_prom @@ -369,9 +357,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits64_c2_load: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shrxq %rsi, (%rdi), %rax ; X64-TRANSFORM-NEXT: shlxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq (%rdi), %rax ; X64-TRANSFORM-NEXT: retq %val = load i64, i64* %w %mask = shl i64 -1, %numlowbits @@ -430,9 +417,8 @@ ; X64-TRANSFORM-LABEL: clear_lowbits64_c3_load_indexzext: ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: # kill: def $esi killed $esi def $rsi -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shrxq %rsi, (%rdi), %rax ; X64-TRANSFORM-NEXT: shlxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq (%rdi), %rax ; X64-TRANSFORM-NEXT: retq %val = load i64, i64* %w %sh_prom = zext i8 %numlowbits to i64 @@ -485,9 +471,8 @@ ; ; X64-TRANSFORM-LABEL: clear_lowbits64_c4_commutative: ; X64-TRANSFORM: # %bb.0: -; X64-TRANSFORM-NEXT: movq $-1, %rax +; X64-TRANSFORM-NEXT: shrxq %rsi, %rdi, %rax ; X64-TRANSFORM-NEXT: shlxq %rsi, %rax, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %mask = shl i64 -1, %numlowbits %masked = and i64 %val, %mask ; swapped order @@ -513,9 +498,8 @@ ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movl $32, %eax ; X86-TRANSFORM-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_ic0: @@ -532,9 +516,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movl $32, %eax ; X64-TRANSFORM-NEXT: subl %esi, %eax -; X64-TRANSFORM-NEXT: movl $-1, %ecx +; X64-TRANSFORM-NEXT: shrxl %eax, %edi, %ecx ; X64-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %numhighbits = sub i32 32, %numlowbits %mask = shl i32 -1, %numhighbits @@ -556,9 +539,8 @@ ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movb $32, %al ; X86-TRANSFORM-NEXT: subb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_ic1_indexzext: @@ -574,9 +556,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movb $32, %al ; X64-TRANSFORM-NEXT: subb %sil, %al -; X64-TRANSFORM-NEXT: movl $-1, %ecx +; X64-TRANSFORM-NEXT: shrxl %eax, %edi, %ecx ; X64-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %numhighbits = sub i8 32, %numlowbits %sh_prom = zext i8 %numhighbits to i32 @@ -599,12 +580,11 @@ ; ; X86-TRANSFORM-LABEL: clear_lowbits32_ic2_load: ; X86-TRANSFORM: # %bb.0: -; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-TRANSFORM-NEXT: movl $32, %eax -; X86-TRANSFORM-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-TRANSFORM-NEXT: movl $-1, %edx -; X86-TRANSFORM-NEXT: shlxl %eax, %edx, %eax -; X86-TRANSFORM-NEXT: andl (%ecx), %eax +; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-TRANSFORM-NEXT: movl $32, %ecx +; X86-TRANSFORM-NEXT: subl {{[0-9]+}}(%esp), %ecx +; X86-TRANSFORM-NEXT: shrxl %ecx, (%eax), %eax +; X86-TRANSFORM-NEXT: shlxl %ecx, %eax, %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_ic2_load: @@ -621,9 +601,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movl $32, %eax ; X64-TRANSFORM-NEXT: subl %esi, %eax -; X64-TRANSFORM-NEXT: movl $-1, %ecx +; X64-TRANSFORM-NEXT: shrxl %eax, (%rdi), %ecx ; X64-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X64-TRANSFORM-NEXT: andl (%rdi), %eax ; X64-TRANSFORM-NEXT: retq %val = load i32, i32* %w %numhighbits = sub i32 32, %numlowbits @@ -645,12 +624,11 @@ ; ; X86-TRANSFORM-LABEL: clear_lowbits32_ic3_load_indexzext: ; X86-TRANSFORM: # %bb.0: -; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-TRANSFORM-NEXT: movb $32, %al -; X86-TRANSFORM-NEXT: subb {{[0-9]+}}(%esp), %al -; X86-TRANSFORM-NEXT: movl $-1, %edx -; X86-TRANSFORM-NEXT: shlxl %eax, %edx, %eax -; X86-TRANSFORM-NEXT: andl (%ecx), %eax +; X86-TRANSFORM-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-TRANSFORM-NEXT: movb $32, %cl +; X86-TRANSFORM-NEXT: subb {{[0-9]+}}(%esp), %cl +; X86-TRANSFORM-NEXT: shrxl %ecx, (%eax), %eax +; X86-TRANSFORM-NEXT: shlxl %ecx, %eax, %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_ic3_load_indexzext: @@ -666,9 +644,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movb $32, %al ; X64-TRANSFORM-NEXT: subb %sil, %al -; X64-TRANSFORM-NEXT: movl $-1, %ecx +; X64-TRANSFORM-NEXT: shrxl %eax, (%rdi), %ecx ; X64-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X64-TRANSFORM-NEXT: andl (%rdi), %eax ; X64-TRANSFORM-NEXT: retq %val = load i32, i32* %w %numhighbits = sub i8 32, %numlowbits @@ -693,9 +670,8 @@ ; X86-TRANSFORM: # %bb.0: ; X86-TRANSFORM-NEXT: movl $32, %eax ; X86-TRANSFORM-NEXT: subl {{[0-9]+}}(%esp), %eax -; X86-TRANSFORM-NEXT: movl $-1, %ecx +; X86-TRANSFORM-NEXT: shrxl %eax, {{[0-9]+}}(%esp), %ecx ; X86-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X86-TRANSFORM-NEXT: andl {{[0-9]+}}(%esp), %eax ; X86-TRANSFORM-NEXT: retl ; ; X64-NOTRANSFORM-LABEL: clear_lowbits32_ic4_commutative: @@ -712,9 +688,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movl $32, %eax ; X64-TRANSFORM-NEXT: subl %esi, %eax -; X64-TRANSFORM-NEXT: movl $-1, %ecx +; X64-TRANSFORM-NEXT: shrxl %eax, %edi, %ecx ; X64-TRANSFORM-NEXT: shlxl %eax, %ecx, %eax -; X64-TRANSFORM-NEXT: andl %edi, %eax ; X64-TRANSFORM-NEXT: retq %numhighbits = sub i32 32, %numlowbits %mask = shl i32 -1, %numhighbits @@ -774,9 +749,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movl $64, %eax ; X64-TRANSFORM-NEXT: subl %esi, %eax -; X64-TRANSFORM-NEXT: movq $-1, %rcx +; X64-TRANSFORM-NEXT: shrxq %rax, %rdi, %rcx ; X64-TRANSFORM-NEXT: shlxq %rax, %rcx, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %numhighbits = sub i64 64, %numlowbits %mask = shl i64 -1, %numhighbits @@ -833,9 +807,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movb $64, %al ; X64-TRANSFORM-NEXT: subb %sil, %al -; X64-TRANSFORM-NEXT: movq $-1, %rcx +; X64-TRANSFORM-NEXT: shrxq %rax, %rdi, %rcx ; X64-TRANSFORM-NEXT: shlxq %rax, %rcx, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %numhighbits = sub i8 64, %numlowbits %sh_prom = zext i8 %numhighbits to i64 @@ -900,9 +873,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movl $64, %eax ; X64-TRANSFORM-NEXT: subl %esi, %eax -; X64-TRANSFORM-NEXT: movq $-1, %rcx +; X64-TRANSFORM-NEXT: shrxq %rax, (%rdi), %rcx ; X64-TRANSFORM-NEXT: shlxq %rax, %rcx, %rax -; X64-TRANSFORM-NEXT: andq (%rdi), %rax ; X64-TRANSFORM-NEXT: retq %val = load i64, i64* %w %numhighbits = sub i64 64, %numlowbits @@ -966,9 +938,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movb $64, %al ; X64-TRANSFORM-NEXT: subb %sil, %al -; X64-TRANSFORM-NEXT: movq $-1, %rcx +; X64-TRANSFORM-NEXT: shrxq %rax, (%rdi), %rcx ; X64-TRANSFORM-NEXT: shlxq %rax, %rcx, %rax -; X64-TRANSFORM-NEXT: andq (%rdi), %rax ; X64-TRANSFORM-NEXT: retq %val = load i64, i64* %w %numhighbits = sub i8 64, %numlowbits @@ -1028,9 +999,8 @@ ; X64-TRANSFORM: # %bb.0: ; X64-TRANSFORM-NEXT: movl $64, %eax ; X64-TRANSFORM-NEXT: subl %esi, %eax -; X64-TRANSFORM-NEXT: movq $-1, %rcx +; X64-TRANSFORM-NEXT: shrxq %rax, %rdi, %rcx ; X64-TRANSFORM-NEXT: shlxq %rax, %rcx, %rax -; X64-TRANSFORM-NEXT: andq %rdi, %rax ; X64-TRANSFORM-NEXT: retq %numhighbits = sub i64 64, %numlowbits %mask = shl i64 -1, %numhighbits