Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -6471,6 +6471,29 @@ } } + if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) + std::swap(LHS, RHS); + + if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && + RHS.hasOneUse()) { + ISD::CondCode LCC = cast(LHS.getOperand(2))->get(); + // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) + // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) + const ConstantSDNode *Mask = dyn_cast(RHS.getOperand(1)); + if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && + (RHS.getOperand(0) == LHS.getOperand(0) && + LHS.getOperand(0) == LHS.getOperand(1))) { + const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; + unsigned NewMask = LCC == ISD::SETO ? + Mask->getZExtValue() & ~OrdMask : + Mask->getZExtValue() & OrdMask; + + SDLoc DL(N); + return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), + DAG.getConstant(NewMask, DL, MVT::i32)); + } + } + if (VT == MVT::i32 && (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { // and x, (sext cc from i1) => select cc, x, 0 Index: test/CodeGen/AMDGPU/fp-classify.ll =================================================================== --- test/CodeGen/AMDGPU/fp-classify.ll +++ test/CodeGen/AMDGPU/fp-classify.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s declare float @llvm.fabs.f32(float) #1 declare double @llvm.fabs.f64(double) #1 @@ -124,14 +124,11 @@ ret void } -; Wrong unordered compare ; GCN-LABEL: {{^}}test_isfinite_pattern_4: ; GCN-DAG: s_load_dword [[X:s[0-9]+]] ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8 -; GCN-DAG: v_cmp_o_f32_e64 [[ORD:s\[[0-9]+:[0-9]+\]]], [[X]], [[X]] ; GCN-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]] -; GCN: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], [[ORD]], vcc -; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[AND]] +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc define amdgpu_kernel void @test_isfinite_pattern_4(i32 addrspace(1)* nocapture %out, float %x) #0 { %ord = fcmp ord float %x, 0.000000e+00 %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 @@ -142,5 +139,46 @@ ret void } +; GCN-LABEL: {{^}}test_isfinite_pattern_4_commute_and: +; GCN-DAG: s_load_dword [[X:s[0-9]+]] +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8 +; GCN-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]] +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc +define amdgpu_kernel void @test_isfinite_pattern_4_commute_and(i32 addrspace(1)* nocapture %out, float %x) #0 { + %ord = fcmp ord float %x, 0.000000e+00 + %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 + %ninf = fcmp one float %x.fabs, 0x7FF0000000000000 + %and = and i1 %ninf, %ord + %ext = zext i1 %and to i32 + store i32 %ext, i32 addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_not_isfinite_pattern_4_wrong_ord_test: +; GCN-DAG: s_load_dword [[X:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}} +; GCN-DAG: s_load_dword [[Y:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x14|0x50}} + +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8 +; GCN-DAG: v_mov_b32_e32 [[VY:v[0-9]+]], [[Y]] + +; SI-DAG: v_cmp_o_f32_e32 vcc, [[X]], [[VY]] +; SI-DAG: v_cmp_class_f32_e64 [[CLASS:s\[[0-9]+:[0-9]+\]]], [[X]], [[K]] +; SI: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], vcc, [[CLASS]] + +; VI-DAG: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[X]], [[VY]] +; VI-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]] +; VI: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], [[CMP]], vcc + +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[AND]] +define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(i32 addrspace(1)* nocapture %out, float %x, [8 x i32], float %y) #0 { + %ord = fcmp ord float %x, %y + %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 + %ninf = fcmp one float %x.fabs, 0x7FF0000000000000 + %and = and i1 %ord, %ninf + %ext = zext i1 %and to i32 + store i32 %ext, i32 addrspace(1)* %out, align 4 + ret void +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } Index: test/CodeGen/AMDGPU/llvm.amdgcn.class.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.amdgcn.class.ll +++ test/CodeGen/AMDGPU/llvm.amdgcn.class.ll @@ -507,5 +507,42 @@ ret void } +; SI-LABEL: {{^}}test_fold_and_ord: +; SI: s_waitcnt +; SI-NEXT: v_cmp_class_f32_e64 s[6:7], v0, 32{{$}} +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7] +; SI-NEXT: s_setpc_b64 +define i1 @test_fold_and_ord(float %a) { + %class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1 + %ord = fcmp ord float %a, %a + %and = and i1 %ord, %class + ret i1 %and +} + +; SI-LABEL: {{^}}test_fold_and_unord: +; SI: s_waitcnt +; SI-NEXT: v_cmp_class_f32_e64 s[6:7], v0, 3{{$}} +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7] +; SI-NEXT: s_setpc_b64 +define i1 @test_fold_and_unord(float %a) { + %class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1 + %ord = fcmp uno float %a, %a + %and = and i1 %ord, %class + ret i1 %and +} + +; SI-LABEL: {{^}}test_fold_and_ord_multi_use: +; SI: v_cmp_class +; SI-NOT: v_cmp_class +; SI: v_cmp_o +; SI: s_and_b64 +define i1 @test_fold_and_ord_multi_use(float %a) { + %class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1 + store volatile i1 %class, i1 addrspace(1)* undef + %ord = fcmp ord float %a, %a + %and = and i1 %ord, %class + ret i1 %and +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone }