Index: llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -932,6 +932,9 @@ if (Instruction *V = dropRedundantMaskingOfLeftShiftInput(&I, Q, Builder)) return V; + if (Value *V = SimplifySelectsFeedingBinaryOp(I, I.getOperand(0), I.getOperand(1))) + return replaceInstUsesWith(I, V); + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); Type *Ty = I.getType(); unsigned BitWidth = Ty->getScalarSizeInBits(); @@ -1049,6 +1052,9 @@ if (Instruction *R = commonShiftTransforms(I)) return R; + if (Value *V = SimplifySelectsFeedingBinaryOp(I, I.getOperand(0), I.getOperand(1))) + return replaceInstUsesWith(I, V); + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); Type *Ty = I.getType(); const APInt *ShAmtAPInt; @@ -1247,6 +1253,9 @@ if (Instruction *R = commonShiftTransforms(I)) return R; + if (Value *V = SimplifySelectsFeedingBinaryOp(I, I.getOperand(0), I.getOperand(1))) + return replaceInstUsesWith(I, V); + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); Type *Ty = I.getType(); unsigned BitWidth = Ty->getScalarSizeInBits(); Index: llvm/lib/Transforms/InstCombine/InstructionCombining.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -853,6 +853,30 @@ Cond = D; True = SimplifyBinOp(Opcode, LHS, E, FMF, Q); False = SimplifyBinOp(Opcode, LHS, F, FMF, Q); + } else if (LHSIsSelect) { + // (A ? B : C) op Y -> A ? (B op Y) : (C op Y) + // B, C, Y constants + if (auto *C1 = dyn_cast(B)) { + if (auto *C2 = dyn_cast(C)) { + if (auto *C3 = dyn_cast(RHS)) { + Cond = A; + True = SimplifyBinOp(Opcode, B, RHS, FMF, Q); + False = SimplifyBinOp(Opcode, C, RHS, FMF, Q); + } + } + } + } else if (RHSIsSelect) { + // X op (D ? E : F) -> D ? (X op E) : (X op F) + // E, F, X constants + if (auto *C1 = dyn_cast(E)) { + if (auto *C2 = dyn_cast(F)) { + if (auto *C3 = dyn_cast(RHS)) { + Cond = D; + True = SimplifyBinOp(Opcode, LHS, E, FMF, Q); + False = SimplifyBinOp(Opcode, LHS, F, FMF, Q); + } + } + } } if (!True || !False) Index: llvm/test/Transforms/InstCombine/lshr-select-const.ll =================================================================== --- /dev/null +++ llvm/test/Transforms/InstCombine/lshr-select-const.ll @@ -0,0 +1,23 @@ +; RUN: opt < %s -instcombine -S | FileCheck %s + +; Instcombine should be able to eliminate the 'lshr' shift because +; the 'select' uses constant source operands that will be shifted +; by a constant amount. + +define i64 @fold_lshr(i1 %arg) { +; CHECK-LABEL: @fold_lshr( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[V8:%.*]] = select i1 [[ARG:%.*]], i64 5369111361, i64 5369111591 +; CHECK-NEXT: ret i64 [[V8]] +entry: + %v0 = select i1 %arg, i64 64, i64 0 + %v1 = lshr exact i64 %v0, 6 + %v2 = xor i64 %v1, 1 + %v3 = shl nuw i64 %v0, 57 + %v4 = ashr exact i64 %v3, 63 + %v5 = sub nsw i64 0, %v2 + %v6 = and i64 %v5, 5369111591 + %v7 = and i64 %v4, 5369111361 + %v8 = add nuw nsw i64 %v7, %v6 + ret i64 %v8 +}