Index: lib/Transforms/InstCombine/InstCombineShifts.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineShifts.cpp +++ lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -20,6 +20,37 @@ #define DEBUG_TYPE "instcombine" +// Given pattern: +// (x shiftopcode Q) shiftopcode K +// we should rewrite it as +// x shiftopcode (Q+K) iff (Q+K) u< bitwidth(x) +// This is valid for any shift, but they must be identical. +static Instruction * +reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *OuterShift, + const SimplifyQuery SQ) { + // Look for: (x shiftopcode ShAmt0) shiftopcode ShAmt1 + Value *X, *ShAmt0, *ShAmt1; + if (!match(OuterShift, + m_Shift(m_Shift(m_Value(X), m_Value(ShAmt0)), m_Value(ShAmt1)))) + return nullptr; + // The shift opcodes must be identical. + Instruction::BinaryOps ShiftOpcode = OuterShift->getOpcode(); + if (ShiftOpcode != cast(OuterShift->getOperand(0))->getOpcode()) + return nullptr; + // Can we fold (ShAmt0+ShAmt1) ? + Value *NewShAmt = SimplifyBinOp(Instruction::BinaryOps::Add, ShAmt0, ShAmt1, + SQ.getWithInstruction(OuterShift)); + if (!NewShAmt) + return nullptr; // Did not simplify. + // Is the new shift amount smaller than the bit width? + // FIXME: could also rely on ConstantRange. + unsigned BitWidth = X->getType()->getScalarSizeInBits(); + if (!match(NewShAmt, m_SpecificInt_ULT(APInt(BitWidth, BitWidth)))) + return nullptr; + // All good, we can do this fold. + return BinaryOperator::Create(ShiftOpcode, X, NewShAmt); +} + Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) { Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); assert(Op0->getType() == Op1->getType()); @@ -38,6 +69,10 @@ if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I)) return Res; + if (Instruction *NewShift = + reassociateShiftAmtsOfTwoSameDirectionShifts(&I, SQ)) + return NewShift; + // (C1 shift (A add C2)) -> (C1 shift C2) shift A) // iff A and C2 are both positive. Value *A; Index: test/Transforms/InstCombine/shift-amount-reassociation.ll =================================================================== --- test/Transforms/InstCombine/shift-amount-reassociation.ll +++ test/Transforms/InstCombine/shift-amount-reassociation.ll @@ -11,10 +11,7 @@ define i32 @t0(i32 %x, i32 %y) { ; CHECK-LABEL: @t0( -; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[Y]], -2 -; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = lshr i32 [[X:%.*]], 30 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = sub i32 32, %y @@ -26,10 +23,7 @@ define <2 x i32> @t1_vec_splat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @t1_vec_splat( -; CHECK-NEXT: [[T0:%.*]] = sub <2 x i32> , [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i32> [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <2 x i32> [[Y]], -; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: ret <2 x i32> [[T3]] ; %t0 = sub <2 x i32> , %y @@ -41,10 +35,7 @@ define <2 x i32> @t2_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @t2_vec_nonsplat( -; CHECK-NEXT: [[T0:%.*]] = sub <2 x i32> , [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i32> [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <2 x i32> [[Y]], -; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: ret <2 x i32> [[T3]] ; %t0 = sub <2 x i32> , %y @@ -58,10 +49,7 @@ define <3 x i32> @t3_vec_nonsplat_undef0(<3 x i32> %x, <3 x i32> %y) { ; CHECK-LABEL: @t3_vec_nonsplat_undef0( -; CHECK-NEXT: [[T0:%.*]] = sub <3 x i32> , [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[Y]], -; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = sub <3 x i32> , %y @@ -73,10 +61,7 @@ define <3 x i32> @t4_vec_nonsplat_undef1(<3 x i32> %x, <3 x i32> %y) { ; CHECK-LABEL: @t4_vec_nonsplat_undef1( -; CHECK-NEXT: [[T0:%.*]] = sub <3 x i32> , [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[Y]], -; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = sub <3 x i32> , %y @@ -88,10 +73,7 @@ define <3 x i32> @t5_vec_nonsplat_undef1(<3 x i32> %x, <3 x i32> %y) { ; CHECK-LABEL: @t5_vec_nonsplat_undef1( -; CHECK-NEXT: [[T0:%.*]] = sub <3 x i32> , [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i32> [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[Y]], -; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = lshr <3 x i32> [[X:%.*]], ; CHECK-NEXT: ret <3 x i32> [[T3]] ; %t0 = sub <3 x i32> , %y @@ -104,10 +86,7 @@ ; Some other shift opcodes define i32 @t6_shl(i32 %x, i32 %y) { ; CHECK-LABEL: @t6_shl( -; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = shl i32 [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[Y]], -2 -; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = shl i32 [[X:%.*]], 30 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = sub i32 32, %y @@ -118,10 +97,7 @@ } define i32 @t7_ashr(i32 %x, i32 %y) { ; CHECK-LABEL: @t7_ashr( -; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[Y:%.*]] -; CHECK-NEXT: [[T1:%.*]] = ashr i32 [[X:%.*]], [[T0]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[Y]], -2 -; CHECK-NEXT: [[T3:%.*]] = ashr i32 [[T1]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = ashr i32 [[X:%.*]], 30 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = sub i32 32, %y