Index: llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -3228,12 +3228,22 @@ case Intrinsic::fshl: case Intrinsic::fshr: - // (rot X, ?) == 0/-1 --> X == 0/-1 - // TODO: This transform is safe to re-use undef elts in a vector, but - // the constant value passed in by the caller doesn't allow that. - if (C.isNullValue() || C.isAllOnesValue()) - if (II->getArgOperand(0) == II->getArgOperand(1)) + if (II->getArgOperand(0) == II->getArgOperand(1)) { + // (rot X, ?) == 0/-1 --> X == 0/-1 + // TODO: This transform is safe to re-use undef elts in a vector, but + // the constant value passed in by the caller doesn't allow that. + if (C.isNullValue() || C.isAllOnesValue()) return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1)); + + const APInt *RotAmtC; + // ror(X, RotAmtC) == C --> X == rol(C, RotAmtC) + // rol(X, RotAmtC) == C --> X == ror(C, RotAmtC) + if (match(II->getArgOperand(2), m_APInt(RotAmtC))) + return new ICmpInst(Pred, II->getArgOperand(0), + II->getIntrinsicID() == Intrinsic::fshl + ? ConstantInt::get(Ty, C.rotr(*RotAmtC)) + : ConstantInt::get(Ty, C.rotl(*RotAmtC))); + } break; case Intrinsic::uadd_sat: { Index: llvm/test/Transforms/InstCombine/icmp-rotate.ll =================================================================== --- llvm/test/Transforms/InstCombine/icmp-rotate.ll +++ llvm/test/Transforms/InstCombine/icmp-rotate.ll @@ -108,8 +108,7 @@ define i1 @rol_eq_cst(i8 %x) { ; CHECK-LABEL: @rol_eq_cst( -; CHECK-NEXT: [[F:%.*]] = tail call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 3) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[F]], 2 +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], 64 ; CHECK-NEXT: ret i1 [[R]] ; %f = tail call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3) @@ -119,8 +118,7 @@ define i1 @rol_ne_cst(i8 %x) { ; CHECK-LABEL: @rol_ne_cst( -; CHECK-NEXT: [[F:%.*]] = tail call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 3) -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[F]], 2 +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], 64 ; CHECK-NEXT: ret i1 [[R]] ; %f = tail call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3) @@ -132,7 +130,7 @@ ; CHECK-LABEL: @rol_eq_cst_use( ; CHECK-NEXT: [[F:%.*]] = tail call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 3) ; CHECK-NEXT: call void @use(i8 [[F]]) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[F]], 2 +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], 64 ; CHECK-NEXT: ret i1 [[R]] ; %f = tail call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 3) @@ -143,8 +141,7 @@ define i1 @ror_eq_cst(i8 %x) { ; CHECK-LABEL: @ror_eq_cst( -; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 6) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[F]], 3 +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], 12 ; CHECK-NEXT: ret i1 [[R]] ; %f = tail call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 2) @@ -154,8 +151,7 @@ define i1 @ror_ne_cst(i8 %x) { ; CHECK-LABEL: @ror_ne_cst( -; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 6) -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[F]], 3 +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], 12 ; CHECK-NEXT: ret i1 [[R]] ; %f = tail call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 2) @@ -165,8 +161,7 @@ define <2 x i1> @rol_eq_cst_vec(<2 x i5> %x) { ; CHECK-LABEL: @rol_eq_cst_vec( -; CHECK-NEXT: [[F:%.*]] = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5> [[X:%.*]], <2 x i5> [[X]], <2 x i5> ) -; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i5> [[F]], +; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i5> [[X:%.*]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; %f = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5> %x, <2 x i5> %x, <2 x i5> )