diff --git a/llvm/test/Transforms/InstCombine/icmp-binop.ll b/llvm/test/Transforms/InstCombine/icmp-binop.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/icmp-binop.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +declare void @use64(i64) +declare void @llvm.assume(i1) + +define i1 @mul_unkV_oddC_eq(i32 %v) { +; CHECK-LABEL: @mul_unkV_oddC_eq( +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[V:%.*]], 3 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i32 %v, 3 + %cmp = icmp eq i32 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_unkV_oddC_eq_nonzero(i32 %v) { +; CHECK-LABEL: @mul_unkV_oddC_eq_nonzero( +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[V:%.*]], 3 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[MUL]], 4 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i32 %v, 3 + %cmp = icmp eq i32 %mul, 4 + ret i1 %cmp +} + +define <2 x i1> @mul_unkV_oddC_ne_vec(<2 x i64> %v) { +; CHECK-LABEL: @mul_unkV_oddC_ne_vec( +; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i64> [[V:%.*]], +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i64> [[MUL]], zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %mul = mul <2 x i64> %v, + %cmp = icmp ne <2 x i64> %mul, + ret <2 x i1> %cmp +} + +define i1 @mul_assumeoddV_asumeoddV_eq(i16 %v, i16 %v2) { +; CHECK-LABEL: @mul_assumeoddV_asumeoddV_eq( +; CHECK-NEXT: [[LB:%.*]] = and i16 [[V:%.*]], 1 +; CHECK-NEXT: [[ODD:%.*]] = icmp ne i16 [[LB]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[ODD]]) +; CHECK-NEXT: [[LB2:%.*]] = and i16 [[V2:%.*]], 1 +; CHECK-NEXT: [[ODD2:%.*]] = icmp ne i16 [[LB2]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[ODD2]]) +; CHECK-NEXT: ret i1 true +; + %lb = and i16 %v, 1 + %odd = icmp ne i16 %lb, 0 + call void @llvm.assume(i1 %odd) + %lb2 = and i16 %v2, 1 + %odd2 = icmp ne i16 %lb2, 0 + call void @llvm.assume(i1 %odd2) + %mul = mul i16 %v, %v2 + %cmp = icmp ne i16 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_unkV_oddC_sge(i8 %v) { +; CHECK-LABEL: @mul_unkV_oddC_sge( +; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[V:%.*]], 3 +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[MUL]], -1 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i8 %v, 3 + %cmp = icmp sge i8 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_reused_unkV_oddC_ne(i64 %v) { +; CHECK-LABEL: @mul_reused_unkV_oddC_ne( +; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[V:%.*]], 3 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL]], 0 +; CHECK-NEXT: call void @use64(i64 [[MUL]]) +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i64 %v, 3 + %cmp = icmp ne i64 %mul, 0 + call void @use64(i64 %mul) + ret i1 %cmp +} + +define i1 @mul_assumeoddV_unkV_eq(i16 %v, i16 %v2) { +; CHECK-LABEL: @mul_assumeoddV_unkV_eq( +; CHECK-NEXT: [[LB:%.*]] = and i16 [[V2:%.*]], 1 +; CHECK-NEXT: [[ODD:%.*]] = icmp ne i16 [[LB]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[ODD]]) +; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[V:%.*]], [[V2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %lb = and i16 %v2, 1 + %odd = icmp eq i16 %lb, 1 + call void @llvm.assume(i1 %odd) + %mul = mul i16 %v, %v2 + %cmp = icmp eq i16 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_reusedassumeoddV_unkV_ne(i64 %v, i64 %v2) { +; CHECK-LABEL: @mul_reusedassumeoddV_unkV_ne( +; CHECK-NEXT: [[LB:%.*]] = and i64 [[V:%.*]], 1 +; CHECK-NEXT: [[ODD:%.*]] = icmp ne i64 [[LB]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[ODD]]) +; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[V]], [[V2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL]], 0 +; CHECK-NEXT: call void @use64(i64 [[MUL]]) +; CHECK-NEXT: ret i1 [[CMP]] +; + %lb = and i64 %v, 1 + %odd = icmp ne i64 %lb, 0 + call void @llvm.assume(i1 %odd) + %mul = mul i64 %v, %v2 + %cmp = icmp ne i64 %mul, 0 + call void @use64(i64 %mul) + ret i1 %cmp +} + +define <2 x i1> @mul_setoddV_unkV_ne(<2 x i32> %v1, <2 x i32> %v2) { +; CHECK-LABEL: @mul_setoddV_unkV_ne( +; CHECK-NEXT: [[V:%.*]] = or <2 x i32> [[V1:%.*]], +; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i32> [[V]], [[V2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[MUL]], zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %v = or <2 x i32> %v1, + %mul = mul <2 x i32> %v, %v2 + %cmp = icmp ne <2 x i32> %mul, + ret <2 x i1> %cmp +} + +define i1 @mul_broddV_unkV_eq(i16 %v, i16 %v2) { +; CHECK-LABEL: @mul_broddV_unkV_eq( +; CHECK-NEXT: [[LB:%.*]] = and i16 [[V2:%.*]], 1 +; CHECK-NEXT: [[ODD_NOT:%.*]] = icmp eq i16 [[LB]], 0 +; CHECK-NEXT: br i1 [[ODD_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[MUL:%.*]] = mul i16 [[V:%.*]], [[V2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i16 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; CHECK: false: +; CHECK-NEXT: call void @use64(i16 [[V]]) +; CHECK-NEXT: ret i1 false +; + %lb = and i16 %v2, 1 + %odd = icmp eq i16 %lb, 1 + br i1 %odd, label %true, label %false +true: + %mul = mul i16 %v, %v2 + %cmp = icmp eq i16 %mul, 0 + ret i1 %cmp +false: + call void @use64(i16 %v) + ret i1 false +} + +define i1 @mul_unkV_evenC_ne(i64 %v) { +; CHECK-LABEL: @mul_unkV_evenC_ne( +; CHECK-NEXT: [[MUL_MASK:%.*]] = and i64 [[V:%.*]], 4611686018427387903 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[MUL_MASK]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i64 %v, 4 + %cmp = icmp ne i64 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_assumenzV_asumenzV_eq(i64 %v, i64 %v2) { +; CHECK-LABEL: @mul_assumenzV_asumenzV_eq( +; CHECK-NEXT: [[NZ:%.*]] = icmp ne i64 [[V:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]]) +; CHECK-NEXT: [[NZ2:%.*]] = icmp ne i64 [[V2:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NZ2]]) +; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[V]], [[V2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %nz = icmp ne i64 %v, 0 + call void @llvm.assume(i1 %nz) + %nz2 = icmp ne i64 %v2, 0 + call void @llvm.assume(i1 %nz2) + %mul = mul i64 %v, %v2 + %cmp = icmp eq i64 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_assumenzV_unkV_nsw_ne(i32 %v, i32 %v2) { +; CHECK-LABEL: @mul_assumenzV_unkV_nsw_ne( +; CHECK-NEXT: [[NZ:%.*]] = icmp ne i32 [[V:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]]) +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[V]], [[V2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %nz = icmp ne i32 %v, 0 + call void @llvm.assume(i1 %nz) + %mul = mul nsw i32 %v, %v2 + %cmp = icmp ne i32 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_selectnzV_unkV_nsw_ne(i8 %v, i8 %v2) { +; CHECK-LABEL: @mul_selectnzV_unkV_nsw_ne( +; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[V:%.*]], 0 +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i8 [[V]], [[V2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[MUL]], 0 +; CHECK-NEXT: [[R:%.*]] = select i1 [[NZ]], i1 [[CMP]], i1 false +; CHECK-NEXT: ret i1 [[R]] +; + %nz = icmp ne i8 %v, 0 + %mul = mul nsw i8 %v, %v2 + %cmp = icmp ne i8 %mul, 0 + %r = select i1 %nz, i1 %cmp, i1 false + ret i1 %r +} + +define <2 x i1> @mul_unkV_unkV_nsw_nuw_ne(<2 x i16> %v, <2 x i16> %v2) { +; CHECK-LABEL: @mul_unkV_unkV_nsw_nuw_ne( +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw <2 x i16> [[V:%.*]], [[V2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i16> [[MUL]], zeroinitializer +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %mul = mul nuw nsw <2 x i16> %v, %v2 + %cmp = icmp ne <2 x i16> %mul, + ret <2 x i1> %cmp +} + +define i1 @mul_setnzV_unkV_nuw_eq(i8 %v1, i8 %v2) { +; CHECK-LABEL: @mul_setnzV_unkV_nuw_eq( +; CHECK-NEXT: [[V:%.*]] = or i8 [[V1:%.*]], 2 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i8 [[V]], [[V2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; + %v = or i8 %v1, 2 + %mul = mul nuw i8 %v, %v2 + %cmp = icmp eq i8 %mul, 0 + ret i1 %cmp +} + +define i1 @mul_brnzV_unkV_nuw_eq(i64 %v, i64 %v2) { +; CHECK-LABEL: @mul_brnzV_unkV_nuw_eq( +; CHECK-NEXT: [[NZ_NOT:%.*]] = icmp eq i64 [[V2:%.*]], 0 +; CHECK-NEXT: br i1 [[NZ_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i64 [[V:%.*]], [[V2]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[MUL]], 0 +; CHECK-NEXT: ret i1 [[CMP]] +; CHECK: false: +; CHECK-NEXT: call void @use64(i64 [[V]]) +; CHECK-NEXT: ret i1 false +; + %nz = icmp ne i64 %v2, 0 + br i1 %nz, label %true, label %false +true: + %mul = mul nuw i64 %v, %v2 + %cmp = icmp eq i64 %mul, 0 + ret i1 %cmp +false: + call void @use64(i64 %v) + ret i1 false +}