diff --git a/llvm/test/Transforms/InstCombine/icmp-mul.ll b/llvm/test/Transforms/InstCombine/icmp-mul.ll --- a/llvm/test/Transforms/InstCombine/icmp-mul.ll +++ b/llvm/test/Transforms/InstCombine/icmp-mul.ll @@ -1,7 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=instcombine -S | FileCheck %s +declare void @llvm.assume(i1) declare void @use(i8) +declare void @usev2xi8(<2 x i8>) + define i1 @squared_nsw_eq0(i5 %x) { ; CHECK-LABEL: @squared_nsw_eq0( @@ -956,3 +959,230 @@ %r = icmp eq i128 %s, 0 ret i1 %r } + +define i1 @mul_oddC_overflow_eq(i8 %v) { +; CHECK-LABEL: @mul_oddC_overflow_eq( +; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[V:%.*]], 5 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MUL]], 101 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i8 %v, 5 + %cmp = icmp eq i8 %mul, 101 + ret i1 %cmp +} + +define i1 @mul_oddC_eq_nomod(i8 %v) { +; CHECK-LABEL: @mul_oddC_eq_nomod( +; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[V:%.*]], 3 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MUL]], 34 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i8 %v, 3 + %cmp = icmp eq i8 %mul, 34 + ret i1 %cmp +} + +define i1 @mul_evenC_ne(i8 %v) { +; CHECK-LABEL: @mul_evenC_ne( +; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[V:%.*]], 6 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[MUL]], 36 +; CHECK-NEXT: ret i1 [[CMP]] +; + %mul = mul i8 %v, 6 + %cmp = icmp ne i8 %mul, 36 + ret i1 %cmp +} + +define <2 x i1> @mul_oddC_ne_vec(<2 x i8> %v) { +; CHECK-LABEL: @mul_oddC_ne_vec( +; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i8> [[V:%.*]], +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[MUL]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %mul = mul <2 x i8> %v, + %cmp = icmp ne <2 x i8> %mul, + ret <2 x i1> %cmp +} + +define <2 x i1> @mul_oddC_ne_nosplat_vec(<2 x i8> %v) { +; CHECK-LABEL: @mul_oddC_ne_nosplat_vec( +; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i8> [[V:%.*]], +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[MUL]], +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %mul = mul <2 x i8> %v, + %cmp = icmp ne <2 x i8> %mul, + ret <2 x i1> %cmp +} + +define i1 @mul_nsuw_xy_z_maybe_zero_eq(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @mul_nsuw_xy_z_maybe_zero_eq( +; CHECK-NEXT: [[MULX:%.*]] = mul nuw nsw i8 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw nsw i8 [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MULX]], [[MULY]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %mulx = mul nsw nuw i8 %x, %z + %muly = mul nsw nuw i8 %y, %z + %cmp = icmp eq i8 %mulx, %muly + ret i1 %cmp +} + +define i1 @mul_xy_z_assumenozero_ne(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @mul_xy_z_assumenozero_ne( +; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[Z:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]]) +; CHECK-NEXT: [[MULX:%.*]] = mul i8 [[X:%.*]], [[Z]] +; CHECK-NEXT: [[MULY:%.*]] = mul i8 [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[MULY]], [[MULX]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %nz = icmp ne i8 %z, 0 + call void @llvm.assume(i1 %nz) + %mulx = mul i8 %x, %z + %muly = mul i8 %y, %z + %cmp = icmp ne i8 %muly, %mulx + ret i1 %cmp +} + +define i1 @mul_xy_z_assumeodd_eq(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @mul_xy_z_assumeodd_eq( +; CHECK-NEXT: [[LB:%.*]] = and i8 [[Z:%.*]], 1 +; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[LB]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]]) +; CHECK-NEXT: [[MULX:%.*]] = mul i8 [[X:%.*]], [[Z]] +; CHECK-NEXT: [[MULY:%.*]] = mul i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[MULX]], [[MULY]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %lb = and i8 %z, 1 + %nz = icmp ne i8 %lb, 0 + call void @llvm.assume(i1 %nz) + %mulx = mul i8 %x, %z + %muly = mul i8 %z, %y + %cmp = icmp eq i8 %mulx, %muly + ret i1 %cmp +} + +define <2 x i1> @reused_mul_nsw_xy_z_setnonzero_vec_ne(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) { +; CHECK-LABEL: @reused_mul_nsw_xy_z_setnonzero_vec_ne( +; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], +; CHECK-NEXT: [[MULX:%.*]] = mul nsw <2 x i8> [[Z]], [[X:%.*]] +; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[MULY]], [[MULX]] +; CHECK-NEXT: call void @usev2xi8(<2 x i8> [[MULY]]) +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %z = or <2 x i8> %zi, + %mulx = mul nsw <2 x i8> %z, %x + %muly = mul nsw <2 x i8> %y, %z + %cmp = icmp ne <2 x i8> %muly, %mulx + call void @usev2xi8(<2 x i8> %muly) + ret <2 x i1> %cmp +} + +define i1 @mul_mixed_nuw_nsw_xy_z_setodd_ult(i8 %x, i8 %y, i8 %zi) { +; CHECK-LABEL: @mul_mixed_nuw_nsw_xy_z_setodd_ult( +; CHECK-NEXT: [[Z:%.*]] = or i8 [[ZI:%.*]], 1 +; CHECK-NEXT: [[MULX:%.*]] = mul nsw i8 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw nsw i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[MULX]], [[MULY]] +; CHECK-NEXT: ret i1 [[CMP]] +; + %z = or i8 %zi, 1 + %mulx = mul nsw i8 %x, %z + %muly = mul nuw nsw i8 %y, %z + %cmp = icmp ult i8 %mulx, %muly + ret i1 %cmp +} + +define i1 @mul_nuw_xy_z_assumenonzero_uge(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @mul_nuw_xy_z_assumenonzero_uge( +; CHECK-NEXT: [[NZ:%.*]] = icmp ne i8 [[Z:%.*]], 0 +; CHECK-NEXT: call void @llvm.assume(i1 [[NZ]]) +; CHECK-NEXT: [[MULX:%.*]] = mul nuw i8 [[X:%.*]], [[Z]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw i8 [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[CMP:%.*]] = icmp uge i8 [[MULY]], [[MULX]] +; CHECK-NEXT: call void @use(i8 [[MULX]]) +; CHECK-NEXT: ret i1 [[CMP]] +; + %nz = icmp ne i8 %z, 0 + call void @llvm.assume(i1 %nz) + %mulx = mul nuw i8 %x, %z + %muly = mul nuw i8 %y, %z + %cmp = icmp uge i8 %muly, %mulx + call void @use(i8 %mulx) + ret i1 %cmp +} + +define <2 x i1> @mul_nuw_xy_z_setnonzero_vec_eq(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) { +; CHECK-LABEL: @mul_nuw_xy_z_setnonzero_vec_eq( +; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], +; CHECK-NEXT: [[MULX:%.*]] = mul nuw <2 x i8> [[Z]], [[X:%.*]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw <2 x i8> [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i8> [[MULX]], [[MULY]] +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %z = or <2 x i8> %zi, + %mulx = mul nuw <2 x i8> %z, %x + %muly = mul nuw <2 x i8> %z, %y + %cmp = icmp eq <2 x i8> %mulx, %muly + ret <2 x i1> %cmp +} + +define i1 @mul_nuw_xy_z_brnonzero_ult(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @mul_nuw_xy_z_brnonzero_ult( +; CHECK-NEXT: [[NZ_NOT:%.*]] = icmp eq i8 [[Z:%.*]], 0 +; CHECK-NEXT: br i1 [[NZ_NOT]], label [[FALSE:%.*]], label [[TRUE:%.*]] +; CHECK: true: +; CHECK-NEXT: [[MULX:%.*]] = mul nuw i8 [[X:%.*]], [[Z]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw i8 [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[MULY]], [[MULX]] +; CHECK-NEXT: ret i1 [[CMP]] +; CHECK: false: +; CHECK-NEXT: call void @use(i8 [[Z]]) +; CHECK-NEXT: ret i1 true +; + %nz = icmp ne i8 %z, 0 + br i1 %nz, label %true, label %false +true: + %mulx = mul nuw i8 %x, %z + %muly = mul nuw i8 %y, %z + %cmp = icmp ult i8 %muly, %mulx + ret i1 %cmp +false: + call void @use(i8 %z) + ret i1 true +} + +define i1 @reused_mul_nuw_xy_z_selectnonzero_ugt(i8 %x, i8 %y, i8 %z) { +; CHECK-LABEL: @reused_mul_nuw_xy_z_selectnonzero_ugt( +; CHECK-NEXT: [[NZ_NOT:%.*]] = icmp eq i8 [[Z:%.*]], 0 +; CHECK-NEXT: [[MULX:%.*]] = mul nuw i8 [[X:%.*]], [[Z]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw i8 [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[MULY]], [[MULX]] +; CHECK-NEXT: [[R:%.*]] = select i1 [[NZ_NOT]], i1 true, i1 [[CMP]] +; CHECK-NEXT: ret i1 [[R]] +; + %nz = icmp ne i8 %z, 0 + %mulx = mul nuw i8 %x, %z + %muly = mul nuw i8 %y, %z + %cmp = icmp ugt i8 %muly, %mulx + %r = select i1 %nz, i1 %cmp, i1 true + ret i1 %r +} + +define <2 x i1> @mul_mixed_nsw_nuw_xy_z_setnonzero_vec_ule(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) { +; CHECK-LABEL: @mul_mixed_nsw_nuw_xy_z_setnonzero_vec_ule( +; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], +; CHECK-NEXT: [[MULX:%.*]] = mul nuw <2 x i8> [[Z]], [[X:%.*]] +; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ule <2 x i8> [[MULY]], [[MULX]] +; CHECK-NEXT: ret <2 x i1> [[CMP]] +; + %z = or <2 x i8> %zi, + %mulx = mul nuw <2 x i8> %x, %z + %muly = mul nsw <2 x i8> %z, %y + %cmp = icmp ule <2 x i8> %muly, %mulx + ret <2 x i1> %cmp +}