diff --git a/llvm/test/Transforms/ConstraintElimination/type-bounds.ll b/llvm/test/Transforms/ConstraintElimination/type-bounds.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/ConstraintElimination/type-bounds.ll @@ -0,0 +1,212 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s + +declare void @llvm.assume(i1) + +define i1 @zext_cmp_i1_to_i32(i1 %a, i32 %b) { +; CHECK-LABEL: @zext_cmp_i1_to_i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i1 [[A:%.*]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 1 +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_1]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i32 [[A_EXT]], [[B]] +; CHECK-NEXT: ret i1 [[CMP_2]] +; +entry: + %a.ext = zext i1 %a to i32 + %cmp.1 = icmp ugt i32 %b, 1 + call void @llvm.assume(i1 %cmp.1) + %cmp.2 = icmp ult i32 %a.ext, %b + ret i1 %cmp.2 +} + +define i1 @zext_cmp_i1_to_i32_2(i1 %a, i32 %b) { +; CHECK-LABEL: @zext_cmp_i1_to_i32_2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i1 [[A:%.*]] to i32 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[A_EXT]], [[B:%.*]] +; CHECK-NEXT: ret i1 [[CMP]] +; +entry: + %a.ext = zext i1 %a to i32 + %cmp = icmp ult i32 %a.ext, %b + ret i1 %cmp +} + +define i1 @zext_cmp_i1_to_i32_signed(i1 %a, i32 %b) { +; CHECK-LABEL: @zext_cmp_i1_to_i32_signed( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i1 [[A:%.*]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 1 +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_1]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp slt i32 [[A_EXT]], [[B]] +; CHECK-NEXT: ret i1 [[CMP_2]] +; +entry: + %a.ext = zext i1 %a to i32 + %cmp.1 = icmp ugt i32 %b, 1 + call void @llvm.assume(i1 %cmp.1) + %cmp.2 = icmp slt i32 %a.ext, %b + ret i1 %cmp.2 +} + +define i1 @zext_cmp_vector_bounds(<2 x i8> %a, <2 x i32> %b) { +; CHECK-LABEL: @zext_cmp_vector_bounds( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext <2 x i8> [[A:%.*]] to <2 x i32> +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt <2 x i32> [[B:%.*]], +; CHECK-NEXT: [[CONT_1:%.*]] = extractelement <2 x i1> [[CMP_1]], i32 0 +; CHECK-NEXT: [[CONT_2:%.*]] = extractelement <2 x i1> [[CMP_1]], i32 1 +; CHECK-NEXT: [[AND:%.*]] = and i1 [[CONT_1]], [[CONT_2]] +; CHECK-NEXT: call void @llvm.assume(i1 [[AND]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult <2 x i32> [[A_EXT]], [[B]] +; CHECK-NEXT: [[CONT_3:%.*]] = extractelement <2 x i1> [[CMP_1]], i32 0 +; CHECK-NEXT: [[CONT_4:%.*]] = extractelement <2 x i1> [[CMP_1]], i32 1 +; CHECK-NEXT: [[AND_2:%.*]] = and i1 [[CONT_3]], [[CONT_4]] +; CHECK-NEXT: ret i1 [[AND_2]] +; +entry: + %a.ext = zext <2 x i8> %a to <2 x i32> + %cmp.1 = icmp ugt <2 x i32> %b, + %cont.1 = extractelement <2 x i1> %cmp.1, i32 0 + %cont.2 = extractelement <2 x i1> %cmp.1, i32 1 + %and = and i1 %cont.1, %cont.2 + call void @llvm.assume(i1 %and) + %cmp.2 = icmp ult <2 x i32> %a.ext, %b + %cont.3 = extractelement <2 x i1> %cmp.1, i32 0 + %cont.4 = extractelement <2 x i1> %cmp.1, i32 1 + %and.2 = and i1 %cont.3, %cont.4 + ret i1 %and.2 +} + +define i1 @zext_cmp_i8_to_i32(i8 %a, i32 %b) { +; CHECK-LABEL: @zext_cmp_i8_to_i32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A:%.*]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 255 +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_1]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i32 [[A_EXT]], [[B]] +; CHECK-NEXT: ret i1 [[CMP_2]] +; +entry: + %a.ext = zext i8 %a to i32 + %cmp.1 = icmp ugt i32 %b, 255 + call void @llvm.assume(i1 %cmp.1) + %cmp.2 = icmp ult i32 %a.ext, %b + ret i1 %cmp.2 +} + +define i1 @zext_cmp_i8_to_i32_less_than_255(i8 %a, i32 %b) { +; CHECK-LABEL: @zext_cmp_i8_to_i32_less_than_255( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A:%.*]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 240 +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_1]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i32 [[A_EXT]], [[B]] +; CHECK-NEXT: ret i1 [[CMP_2]] +; +entry: + %a.ext = zext i8 %a to i32 + %cmp.1 = icmp ugt i32 %b, 240 + call void @llvm.assume(i1 %cmp.1) + %cmp.2 = icmp ult i32 %a.ext, %b + ret i1 %cmp.2 +} + +define i1 @zext_cmp_i8_to_i32_greater_than_255(i8 %a, i32 %b) { +; CHECK-LABEL: @zext_cmp_i8_to_i32_greater_than_255( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A:%.*]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 260 +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_1]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i32 [[A_EXT]], [[B]] +; CHECK-NEXT: ret i1 [[CMP_2]] +; +entry: + %a.ext = zext i8 %a to i32 + %cmp.1 = icmp ugt i32 %b, 260 + call void @llvm.assume(i1 %cmp.1) + %cmp.2 = icmp ult i32 %a.ext, %b + ret i1 %cmp.2 +} + +define i1 @zext_cmp_i8_to_i32_add(i8 %a, i8 %b, i32 %c) { +; CHECK-LABEL: @zext_cmp_i8_to_i32_add( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i8 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[ADD_EXT:%.*]] = zext i8 [[ADD]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[C:%.*]], 255 +; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_1]]) +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i32 [[ADD_EXT]], [[C]] +; CHECK-NEXT: ret i1 [[CMP_2]] +; +entry: + %add = add nuw nsw i8 %a, %b + %add.ext = zext i8 %add to i32 + %cmp.1 = icmp ugt i32 %c, 255 + call void @llvm.assume(i1 %cmp.1) + %cmp.2 = icmp ult i32 %add.ext, %c + ret i1 %cmp.2 +} + +define i1 @zext_multiple_uses(i8 %a, i32 %b, i32 %b2) { +; CHECK-LABEL: @zext_multiple_uses( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A:%.*]] to i32 +; CHECK-NEXT: [[A2_EXT:%.*]] = zext i8 [[A]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 255 +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ugt i32 [[B2:%.*]], 255 +; CHECK-NEXT: [[PRE_CMP:%.*]] = and i1 [[CMP_1]], [[CMP_2]] +; CHECK-NEXT: br i1 [[PRE_CMP]], label [[COND:%.*]], label [[EXIT:%.*]] +; CHECK: cond: +; CHECK-NEXT: [[CMP_3:%.*]] = icmp ult i32 [[A_EXT]], [[B]] +; CHECK-NEXT: [[CMP_4:%.*]] = icmp ult i32 [[A2_EXT]], [[B2]] +; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP_3]], [[CMP_4]] +; CHECK-NEXT: ret i1 [[AND]] +; CHECK: exit: +; CHECK-NEXT: ret i1 false +; +entry: + %a.ext = zext i8 %a to i32 + %a2.ext = zext i8 %a to i32 + %cmp.1 = icmp ugt i32 %b, 255 + %cmp.2 = icmp ugt i32 %b2, 255 + %pre.cmp = and i1 %cmp.1, %cmp.2 + br i1 %pre.cmp, label %cond, label %exit + +cond: + %cmp.3 = icmp ult i32 %a.ext, %b + %cmp.4 = icmp ult i32 %a2.ext, %b2 + %and = and i1 %cmp.3, %cmp.4 + ret i1 %and + +exit: + ret i1 false +} + +define i1 @zext_operand_used_in_sext(i8 %a, i32 %b, i32 %b2) { +; CHECK-LABEL: @zext_operand_used_in_sext( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[A_EXT:%.*]] = zext i8 [[A:%.*]] to i32 +; CHECK-NEXT: [[A2_EXT:%.*]] = sext i8 [[A]] to i32 +; CHECK-NEXT: [[CMP_1:%.*]] = icmp ugt i32 [[B:%.*]], 255 +; CHECK-NEXT: [[CMP_2:%.*]] = icmp ugt i32 [[B2:%.*]], 255 +; CHECK-NEXT: [[PRE_CMP:%.*]] = and i1 [[CMP_1]], [[CMP_2]] +; CHECK-NEXT: call void @llvm.assume(i1 [[PRE_CMP]]) +; CHECK-NEXT: [[CMP_3:%.*]] = icmp ult i32 [[A_EXT]], [[B]] +; CHECK-NEXT: [[CMP_4:%.*]] = icmp ult i32 [[A2_EXT]], [[B2]] +; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP_3]], [[CMP_4]] +; CHECK-NEXT: ret i1 [[AND]] +; +entry: + %a.ext = zext i8 %a to i32 + %a2.ext = sext i8 %a to i32 + %cmp.1 = icmp ugt i32 %b, 255 + %cmp.2 = icmp ugt i32 %b2, 255 + %pre.cmp = and i1 %cmp.1, %cmp.2 + call void @llvm.assume(i1 %pre.cmp) + %cmp.3 = icmp ult i32 %a.ext, %b + %cmp.4 = icmp ult i32 %a2.ext, %b2 + %and = and i1 %cmp.3, %cmp.4 + ret i1 %and +}