diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll @@ -0,0 +1,2058 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck %s + +; The tests check the following optimization of DAGCombiner: +; CMP(A,C)||CMP(B,C) => CMP(MIN/MAX(A,B), C) +; CMP(A,C)&&CMP(B,C) => CMP(MIN/MAX(A,B), C) + +define i1 @test1(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test1: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, 1000 + %cmp2 = icmp slt i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test2(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test2: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, 1000 + %cmp2 = icmp ult i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test3(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test3: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0x3e9, v0 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, 0x3e9, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sle i32 %arg1, 1000 + %cmp2 = icmp sle i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test4(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test4: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x3e9, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 0x3e9, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ule i32 %arg1, 1000 + %cmp2 = icmp ule i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test5(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test5: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, 1000 + %cmp2 = icmp sgt i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test6(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test6: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ugt i32 %arg1, 1000 + %cmp2 = icmp ugt i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test7(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test7: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, 0x3e7, v0 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, 0x3e7, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sge i32 %arg1, 1000 + %cmp2 = icmp sge i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test8(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test8: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3e7, v0 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 0x3e7, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp uge i32 %arg1, 1000 + %cmp2 = icmp uge i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test9(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test9: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, %arg3 + %cmp2 = icmp slt i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test10(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test10: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, %arg3 + %cmp2 = icmp ult i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test11(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test11: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sle i32 %arg1, %arg3 + %cmp2 = icmp sle i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test12(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test12: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ule i32 %arg1, %arg3 + %cmp2 = icmp ule i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test13(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test13: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, %arg3 + %cmp2 = icmp sgt i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test14(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test14: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ugt i32 %arg1, %arg3 + %cmp2 = icmp ugt i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test15(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test15: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ge_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sge i32 %arg1, %arg3 + %cmp2 = icmp sge i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test16(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test16: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp uge i32 %arg1, %arg3 + %cmp2 = icmp uge i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test17(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test17: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, 1000 + %cmp2 = icmp slt i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test18(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test18: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, 1000 + %cmp2 = icmp ult i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test19(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test19: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0x3e9, v0 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, 0x3e9, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sle i32 %arg1, 1000 + %cmp2 = icmp sle i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test20(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test20: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x3e9, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 0x3e9, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ule i32 %arg1, 1000 + %cmp2 = icmp ule i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test21(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test21: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, 1000 + %cmp2 = icmp sgt i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test22(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test22: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ugt i32 %arg1, 1000 + %cmp2 = icmp ugt i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test23(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test23: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, 0x3e7, v0 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, 0x3e7, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sge i32 %arg1, 1000 + %cmp2 = icmp sge i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test24(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test24: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3e7, v0 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 0x3e7, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp uge i32 %arg1, 1000 + %cmp2 = icmp uge i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test25(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test25: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, %arg3 + %cmp2 = icmp slt i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test26(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test26: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, %arg3 + %cmp2 = icmp ult i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test27(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test27: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sle i32 %arg1, %arg3 + %cmp2 = icmp sle i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test28(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test28: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ule i32 %arg1, %arg3 + %cmp2 = icmp ule i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test29(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test29: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, %arg3 + %cmp2 = icmp sgt i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test30(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test30: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ugt i32 %arg1, %arg3 + %cmp2 = icmp ugt i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test31(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test31: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ge_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sge i32 %arg1, %arg3 + %cmp2 = icmp sge i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test32(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test32: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ge_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp uge i32 %arg1, %arg3 + %cmp2 = icmp uge i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test33(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test33: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v0, v1 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, 0x3e8, v0 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, %arg2 + %cmp2 = icmp slt i32 %arg1, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define amdgpu_gfx void @test34(i32 inreg %arg1, i32 inreg %arg2) { +; CHECK-LABEL: test34: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmpk_lt_i32 s4, 0x3e9 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmpk_lt_i32 s5, 0x3e9 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sle i32 %arg1, 1000 + %cmp2 = icmp sle i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + store volatile i1 %or, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test35(i32 inreg %arg1, i32 inreg %arg2) { +; CHECK-LABEL: test35: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmpk_gt_i32 s4, 0x3e8 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmpk_gt_i32 s5, 0x3e8 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, 1000 + %cmp2 = icmp sgt i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + store volatile i1 %or, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test36(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3) { +; CHECK-LABEL: test36: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmp_lt_u32 s4, s6 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmp_lt_u32 s5, s6 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, %arg3 + %cmp2 = icmp ult i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + store volatile i1 %or, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test37(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3) { +; CHECK-LABEL: test37: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmp_ge_i32 s4, s6 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmp_ge_i32 s5, s6 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sge i32 %arg1, %arg3 + %cmp2 = icmp sge i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + store volatile i1 %or, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test38(i32 inreg %arg1, i32 inreg %arg2) { +; CHECK-LABEL: test38: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmpk_lt_u32 s4, 0x3e9 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmpk_lt_u32 s5, 0x3e9 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_and_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ule i32 %arg1, 1000 + %cmp2 = icmp ule i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + store volatile i1 %and, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test39(i32 inreg %arg1, i32 inreg %arg2) { +; CHECK-LABEL: test39: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmpk_gt_i32 s4, 0x3e7 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmpk_gt_i32 s5, 0x3e7 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_and_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sge i32 %arg1, 1000 + %cmp2 = icmp sge i32 %arg2, 1000 + %and = and i1 %cmp1, %cmp2 + store volatile i1 %and, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test40(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3) { +; CHECK-LABEL: test40: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmp_le_i32 s4, s6 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmp_le_i32 s5, s6 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_and_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sle i32 %arg1, %arg3 + %cmp2 = icmp sle i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + store volatile i1 %and, ptr addrspace(1) null + ret void +} + +define amdgpu_gfx void @test41(i32 inreg %arg1, i32 inreg %arg2, i32 inreg %arg3) { +; CHECK-LABEL: test41: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_cmp_ge_u32 s4, s6 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_cselect_b32 s0, -1, 0 +; CHECK-NEXT: s_cmp_ge_u32 s5, s6 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_cselect_b32 s1, -1, 0 +; CHECK-NEXT: s_and_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0 +; CHECK-NEXT: global_store_b8 v[0:1], v2, off dlc +; CHECK-NEXT: s_waitcnt_vscnt null, 0x0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp uge i32 %arg1, %arg3 + %cmp2 = icmp uge i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + store volatile i1 %and, ptr addrspace(1) null + ret void +} + +define i1 @test42(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test42: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg3, %arg1 + %cmp2 = icmp ult i32 %arg3, %arg2 + %or = and i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test43(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test43: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg3, %arg1 + %cmp2 = icmp ult i32 %arg3, %arg2 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test44(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test44: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ugt i32 %arg3, %arg1 + %cmp2 = icmp ugt i32 %arg3, %arg2 + %or = and i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test45(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test45: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ugt i32 %arg3, %arg1 + %cmp2 = icmp ugt i32 %arg3, %arg2 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test46(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test46: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg3, %arg1 + %cmp2 = icmp sgt i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test47(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test47: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, %arg3 + %cmp2 = icmp slt i32 %arg3, %arg2 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test48(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test48: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, %arg3 + %cmp2 = icmp sgt i32 %arg3, %arg2 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test49(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test49: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg3, %arg1 + %cmp2 = icmp slt i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test50(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test50: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg3, %arg1 + %cmp2 = icmp sgt i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test51(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test51: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg1, %arg3 + %cmp2 = icmp slt i32 %arg3, %arg2 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test52(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test52: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, %arg3 + %cmp2 = icmp sgt i32 %arg3, %arg2 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test53(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test53: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_lt_i32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp sgt i32 %arg3, %arg1 + %cmp2 = icmp slt i32 %arg2, %arg3 + %and = and i1 %cmp1, %cmp2 + ret i1 %and +} + +define i1 @test54(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test54: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp olt float %arg1, %arg3 + %cmp2 = fcmp olt float %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test55(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test55: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ole double %arg1, %arg3 + %cmp2 = fcmp ole double %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test56(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test56: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ogt double %arg1, %arg3 + %cmp2 = fcmp ogt double %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test57(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test57: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp oge float %arg1, %arg3 + %cmp2 = fcmp oge float %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test58(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test58: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ugt double %arg1, %arg3 + %cmp2 = fcmp ugt double %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test59(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test59: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp uge float %arg1, %arg3 + %cmp2 = fcmp uge float %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test60(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test60: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ule float %arg1, %arg3 + %cmp2 = fcmp ule float %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test61(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test61: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ult double %arg1, %arg3 + %cmp2 = fcmp ult double %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test62(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test62: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1 +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan float %arg1, 1.0 + %add2 = fadd nnan float %arg2, 2.0 + %cmp1 = fcmp nnan olt float %add1, %arg3 + %cmp2 = fcmp nnan olt float %add2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test63(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test63: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 +; CHECK-NEXT: v_add_f64 v[2:3], v[2:3], 2.0 +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan double %arg1, 1.0 + %add2 = fadd nnan double %arg2, 2.0 + %cmp1 = fcmp nnan ole double %add1, %arg3 + %cmp2 = fcmp nnan ole double %add2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test64(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test64: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 +; CHECK-NEXT: v_add_f64 v[2:3], v[2:3], 2.0 +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan double %arg1, 1.0 + %add2 = fadd nnan double %arg2, 2.0 + %cmp1 = fcmp nnan ogt double %add1, %arg3 + %cmp2 = fcmp nnan ogt double %add2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test65(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test65: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1 +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan float %arg1, 1.0 + %add2 = fadd nnan float %arg2, 2.0 + %cmp1 = fcmp nnan oge float %add1, %arg3 + %cmp2 = fcmp nnan oge float %add2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test66(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test66: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 +; CHECK-NEXT: v_add_f64 v[2:3], v[2:3], 2.0 +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan double %arg1, 1.0 + %add2 = fadd nnan double %arg2, 2.0 + %cmp1 = fcmp nnan ugt double %add1, %arg3 + %cmp2 = fcmp nnan ugt double %add2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test67(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test67: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1 +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan float %arg1, 1.0 + %add2 = fadd nnan float %arg2, 2.0 + %cmp1 = fcmp nnan uge float %add1, %arg3 + %cmp2 = fcmp nnan uge float %add2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test68(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test68: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1 +; CHECK-NEXT: v_cmp_le_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan float %arg1, 1.0 + %add2 = fadd nnan float %arg2, 2.0 + %cmp1 = fcmp nnan ule float %add1, %arg3 + %cmp2 = fcmp nnan ule float %add2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test69(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test69: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 +; CHECK-NEXT: v_add_f64 v[2:3], v[2:3], 2.0 +; CHECK-NEXT: v_cmp_lt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_lt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan double %arg1, 1.0 + %add2 = fadd nnan double %arg2, 2.0 + %cmp1 = fcmp nnan ult double %add1, %arg3 + %cmp2 = fcmp nnan ult double %add2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test70(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test70: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp olt float %var1, %arg3 + %cmp2 = fcmp olt float %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test71(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test71: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ole double %var1, %arg3 + %cmp2 = fcmp ole double %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test72(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test72: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ogt double %var1, %arg3 + %cmp2 = fcmp ogt double %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test73(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test73: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp oge float %var1, %arg3 + %cmp2 = fcmp oge float %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test74(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test74: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ugt double %var1, %arg3 + %cmp2 = fcmp ugt double %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test75(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test75: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp uge float %var1, %arg3 + %cmp2 = fcmp uge float %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test76(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test76: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp ule float %var1, %arg3 + %cmp2 = fcmp ule float %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test77(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test77: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ult double %var1, %arg3 + %cmp2 = fcmp ult double %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test78(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test78: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp olt float %arg1, %arg3 + %cmp2 = fcmp ogt float %arg3, %arg2 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test79(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test79: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nle_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ult float %arg1, %arg3 + %cmp2 = fcmp ugt float %arg3, %arg2 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test80(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test80: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_add_f32 v0, 1.0, v0 :: v_dual_add_f32 v1, 2.0, v1 +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan float %arg1, 1.0 + %add2 = fadd nnan float %arg2, 2.0 + %cmp1 = fcmp nnan oge float %add1, %arg3 + %cmp2 = fcmp nnan ole float %arg3, %add2 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test81(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test81: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_add_f64 v[0:1], v[0:1], 1.0 +; CHECK-NEXT: v_add_f64 v[2:3], v[2:3], 2.0 +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_lt_f64_e64 s0, v[4:5], v[2:3] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %add1 = fadd nnan double %arg1, 1.0 + %add2 = fadd nnan double %arg2, 2.0 + %cmp1 = fcmp nnan ugt double %add1, %arg3 + %cmp2 = fcmp nnan ult double %arg3, %add2 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test82(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test82: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_ge_f64_e64 s0, v[4:5], v[2:3] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ole double %var1, %arg3 + %cmp2 = fcmp oge double %arg3, %var2 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test83(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test83: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp ule float %var1, %arg3 + %cmp2 = fcmp uge float %arg3, %var2 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test84(half %arg1, half %arg2, half %arg3) { +; CHECK-LABEL: test84: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f16_e32 v0, v0, v0 +; CHECK-NEXT: v_max_f16_e32 v1, v1, v1 +; CHECK-NEXT: v_cmp_lt_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_f16_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call half @llvm.canonicalize.f16(half %arg1) + %var2 = call half @llvm.canonicalize.f16(half %arg2) + %cmp1 = fcmp olt half %var1, %arg3 + %cmp2 = fcmp olt half %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define <2 x i1> @test85(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) { +; CHECK-LABEL: test85: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_pk_max_f16 v0, v0, v0 +; CHECK-NEXT: v_pk_max_f16 v1, v1, v1 +; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; CHECK-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; CHECK-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; CHECK-NEXT: v_cmp_le_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_f16_e64 s0, v1, v2 +; CHECK-NEXT: v_cmp_le_f16_e64 s1, v4, v3 +; CHECK-NEXT: v_cmp_le_f16_e64 s2, v5, v3 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_or_b32 s0, s1, s2 +; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1) + %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2) + %cmp1 = fcmp ole <2 x half> %var1, %arg3 + %cmp2 = fcmp ole <2 x half> %var2, %arg3 + %or1 = or <2 x i1> %cmp1, %cmp2 + ret <2 x i1> %or1 +} + +define <2 x i1> @test86(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) { +; CHECK-LABEL: test86: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_pk_max_f16 v0, v0, v0 +; CHECK-NEXT: v_pk_max_f16 v1, v1, v1 +; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; CHECK-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; CHECK-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; CHECK-NEXT: v_cmp_gt_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_f16_e64 s0, v1, v2 +; CHECK-NEXT: v_cmp_gt_f16_e64 s1, v4, v3 +; CHECK-NEXT: v_cmp_gt_f16_e64 s2, v5, v3 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_or_b32 s0, s1, s2 +; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1) + %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2) + %cmp1 = fcmp ogt <2 x half> %var1, %arg3 + %cmp2 = fcmp ogt <2 x half> %var2, %arg3 + %or1 = or <2 x i1> %cmp1, %cmp2 + ret <2 x i1> %or1 +} + +define i1 @test87(half %arg1, half %arg2, half %arg3) { +; CHECK-LABEL: test87: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f16_e32 v0, v0, v0 +; CHECK-NEXT: v_max_f16_e32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ge_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f16_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call half @llvm.canonicalize.f16(half %arg1) + %var2 = call half @llvm.canonicalize.f16(half %arg2) + %cmp1 = fcmp oge half %var1, %arg3 + %cmp2 = fcmp oge half %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define <2 x i1> @test88(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) { +; CHECK-LABEL: test88: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_pk_max_f16 v0, v0, v0 +; CHECK-NEXT: v_pk_max_f16 v1, v1, v1 +; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; CHECK-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; CHECK-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; CHECK-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nle_f16_e64 s0, v1, v2 +; CHECK-NEXT: v_cmp_nle_f16_e64 s1, v4, v3 +; CHECK-NEXT: v_cmp_nle_f16_e64 s2, v5, v3 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_and_b32 s0, s1, s2 +; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1) + %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2) + %cmp1 = fcmp ugt <2 x half> %var1, %arg3 + %cmp2 = fcmp ugt <2 x half> %var2, %arg3 + %and1 = and <2 x i1> %cmp1, %cmp2 + ret <2 x i1> %and1 +} + +define i1 @test89(half %arg1, half %arg2, half %arg3) { +; CHECK-LABEL: test89: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f16_e32 v0, v0, v0 +; CHECK-NEXT: v_max_f16_e32 v1, v1, v1 +; CHECK-NEXT: v_cmp_nlt_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f16_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call half @llvm.canonicalize.f16(half %arg1) + %var2 = call half @llvm.canonicalize.f16(half %arg2) + %cmp1 = fcmp uge half %var1, %arg3 + %cmp2 = fcmp uge half %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test90(half %arg1, half %arg2, half %arg3) { +; CHECK-LABEL: test90: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f16_e32 v0, v0, v0 +; CHECK-NEXT: v_max_f16_e32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ngt_f16_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call half @llvm.canonicalize.f16(half %arg1) + %var2 = call half @llvm.canonicalize.f16(half %arg2) + %cmp1 = fcmp ule half %var1, %arg3 + %cmp2 = fcmp ule half %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define <2 x i1> @test91(<2 x half> %arg1, <2 x half> %arg2, <2 x half> %arg3) { +; CHECK-LABEL: test91: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_pk_max_f16 v0, v0, v0 +; CHECK-NEXT: v_pk_max_f16 v1, v1, v1 +; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v2 +; CHECK-NEXT: v_lshrrev_b32_e32 v4, 16, v0 +; CHECK-NEXT: v_lshrrev_b32_e32 v5, 16, v1 +; CHECK-NEXT: v_cmp_nge_f16_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nge_f16_e64 s0, v1, v2 +; CHECK-NEXT: v_cmp_nge_f16_e64 s1, v4, v3 +; CHECK-NEXT: v_cmp_nge_f16_e64 s2, v5, v3 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_and_b32 s0, s1, s2 +; CHECK-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg1) + %var2 = call <2 x half> @llvm.canonicalize.v2f16(<2 x half> %arg2) + %cmp1 = fcmp ult <2 x half> %var1, %arg3 + %cmp2 = fcmp ult <2 x half> %var2, %arg3 + %and1 = and <2 x i1> %cmp1, %cmp2 + ret <2 x i1> %and1 +} + +; The optimization does not apply to the following tests. + +define i1 @test92(i32 %arg1, i64 %arg2) { +; CHECK-LABEL: test92: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: s_mov_b64 s[0:1], 0x3e8 +; CHECK-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[0:1], v[1:2] +; CHECK-NEXT: v_cmp_gt_i32_e64 s0, 0x3e8, v0 +; CHECK-NEXT: s_or_b32 s0, s0, vcc_lo +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp slt i32 %arg1, 1000 + %cmp2 = icmp slt i64 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test93(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test93: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_eq_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp eq i32 %arg1, 1000 + %cmp2 = icmp eq i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test94(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test94: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0x3e8, v0 +; CHECK-NEXT: v_cmp_ne_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ne i32 %arg1, 1000 + %cmp2 = icmp ne i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test95(i64 %arg1, i64 %arg2, i64 %arg3) { +; CHECK-LABEL: test95: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_lt_u64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i64 %arg1, %arg3 + %cmp2 = icmp ult i64 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test96(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test96: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, %arg3 + %cmp2 = icmp ult i32 %arg3, %arg2 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test97(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test97: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_le_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, %arg3 + %cmp2 = icmp ule i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test98(i32 %arg1, i32 %arg2, i32 %arg3) { +; CHECK-LABEL: test98: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_u32_e32 vcc_lo, v2, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ule i32 %arg3, %arg1 + %cmp2 = icmp ugt i32 %arg2, %arg3 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test99(i16 %arg1, i32 %arg2) { +; CHECK-LABEL: test99: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u16_e32 vcc_lo, 10, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 10, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i16 %arg1, 10 + %cmp2 = icmp ult i32 %arg2, 10 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test100(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4) { +; CHECK-LABEL: test100: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_u32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_u32_e64 s0, v1, v2 +; CHECK-NEXT: v_cmp_lt_u32_e64 s1, v0, v3 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: s_or_b32 s1, s1, vcc_lo +; CHECK-NEXT: s_or_b32 s0, s0, s1 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, %arg3 + %cmp2 = icmp ult i32 %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + %cmp3 = icmp ult i32 %arg1, %arg4 + %or2 = or i1 %cmp3, %cmp1 + %or3 = or i1 %or1, %or2 + ret i1 %or3 +} + +define i1 @test101(i32 %arg1, i32 %arg2) { +; CHECK-LABEL: test101: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 0x64, v0 +; CHECK-NEXT: v_cmp_gt_u32_e64 s0, 0x3e8, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = icmp ult i32 %arg1, 100 + %cmp2 = icmp ult i32 %arg2, 1000 + %or = or i1 %cmp1, %cmp2 + ret i1 %or +} + +define i1 @test102(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test102: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_gt_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp olt float %arg1, %arg3 + %cmp2 = fcmp ogt float %arg3, %arg2 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test103(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test103: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nle_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ult float %arg1, %arg3 + %cmp2 = fcmp ugt float %arg3, %arg2 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test104(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test104: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_ge_f64_e64 s0, v[4:5], v[2:3] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ole double %var1, %arg3 + %cmp2 = fcmp oge double %arg3, %var2 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test105(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test105: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f32_e64 s0, v2, v1 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp ule float %var1, %arg3 + %cmp2 = fcmp uge float %arg3, %var2 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test106(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test106: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp olt float %arg1, %arg3 + %cmp2 = fcmp olt float %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test107(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test107: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ole double %arg1, %arg3 + %cmp2 = fcmp ole double %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test108(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test108: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ogt double %arg1, %arg3 + %cmp2 = fcmp ogt double %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test109(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test109: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp oge float %arg1, %arg3 + %cmp2 = fcmp oge float %arg2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test110(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test110: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ugt double %arg1, %arg3 + %cmp2 = fcmp ugt double %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test111(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test111: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp uge float %arg1, %arg3 + %cmp2 = fcmp uge float %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test112(float %arg1, float %arg2, float %arg3) #0 { +; CHECK-LABEL: test112: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ule float %arg1, %arg3 + %cmp2 = fcmp ule float %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test113(double %arg1, double %arg2, double %arg3) #0 { +; CHECK-LABEL: test113: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %cmp1 = fcmp ult double %arg1, %arg3 + %cmp2 = fcmp ult double %arg2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test114(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test114: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_lt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_lt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp olt float %var1, %arg3 + %cmp2 = fcmp olt float %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test115(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test115: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_le_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_le_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ole double %var1, %arg3 + %cmp2 = fcmp ole double %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test116(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test116: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_gt_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_gt_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ogt double %var1, %arg3 + %cmp2 = fcmp ogt double %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test117(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test117: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ge_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ge_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp oge float %var1, %arg3 + %cmp2 = fcmp oge float %var2, %arg3 + %and1 = and i1 %cmp1, %cmp2 + ret i1 %and1 +} + +define i1 @test118(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test118: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_nle_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nle_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ugt double %var1, %arg3 + %cmp2 = fcmp ugt double %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test119(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test119: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_nlt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_nlt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp uge float %var1, %arg3 + %cmp2 = fcmp uge float %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test120(float %arg1, float %arg2, float %arg3) { +; CHECK-LABEL: test120: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1 +; CHECK-NEXT: v_cmp_ngt_f32_e32 vcc_lo, v0, v2 +; CHECK-NEXT: v_cmp_ngt_f32_e64 s0, v1, v2 +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call float @llvm.canonicalize.f32(float %arg1) + %var2 = call float @llvm.canonicalize.f32(float %arg2) + %cmp1 = fcmp ule float %var1, %arg3 + %cmp2 = fcmp ule float %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +define i1 @test121(double %arg1, double %arg2, double %arg3) { +; CHECK-LABEL: test121: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1] +; CHECK-NEXT: v_max_f64 v[2:3], v[2:3], v[2:3] +; CHECK-NEXT: v_cmp_nge_f64_e32 vcc_lo, v[0:1], v[4:5] +; CHECK-NEXT: v_cmp_nge_f64_e64 s0, v[2:3], v[4:5] +; CHECK-NEXT: s_or_b32 s0, vcc_lo, s0 +; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %var1 = call double @llvm.canonicalize.f64(double %arg1) + %var2 = call double @llvm.canonicalize.f64(double %arg2) + %cmp1 = fcmp ult double %var1, %arg3 + %cmp2 = fcmp ult double %var2, %arg3 + %or1 = or i1 %cmp1, %cmp2 + ret i1 %or1 +} + +declare double @llvm.canonicalize.f64(double) +declare float @llvm.canonicalize.f32(float) +declare half @llvm.canonicalize.f16(half) +declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>) + +attributes #0 = { nounwind "amdgpu-ieee"="false" }