diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1714,14 +1714,19 @@ unsigned Opc = 0; switch (CCCode) { default: llvm_unreachable("Don't know how to expand this condition!"); + case ISD::SETUO: + if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) { + CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; + break; + } + assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) + && "If SETUE is expanded, SETOEQ or SETUNE must be legal!"); + NeedInvert = true; + LLVM_FALLTHROUGH; case ISD::SETO: assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) && "If SETO is expanded, SETOEQ must be legal!"); CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break; - case ISD::SETUO: - assert(TLI.isCondCodeLegal(ISD::SETUNE, OpVT) - && "If SETUO is expanded, SETUNE must be legal!"); - CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break; case ISD::SETOEQ: case ISD::SETOGT: case ISD::SETOGE: diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -186,7 +186,7 @@ ISD::CondCode FPCCToExtend[] = { ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, - ISD::SETGE, ISD::SETNE}; + ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; ISD::NodeType FPOpToExtend[] = { ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP, diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoD.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoD.td @@ -299,23 +299,6 @@ def : PatFpr64Fpr64; def : PatFpr64Fpr64; -// Define pattern expansions for setcc operations which aren't directly -// handled by a RISC-V instruction and aren't expanded in the SelectionDAG -// Legalizer. - -def : Pat<(seto FPR64:$rs1, FPR64:$rs2), - (AND (FEQ_D FPR64:$rs1, FPR64:$rs1), - (FEQ_D FPR64:$rs2, FPR64:$rs2))>; -def : Pat<(seto FPR64:$rs1, FPR64:$rs1), - (FEQ_D $rs1, $rs1)>; - -def : Pat<(setuo FPR64:$rs1, FPR64:$rs2), - (SLTIU (AND (FEQ_D FPR64:$rs1, FPR64:$rs1), - (FEQ_D FPR64:$rs2, FPR64:$rs2)), - 1)>; -def : Pat<(setuo FPR64:$rs1, FPR64:$rs1), - (SLTIU (FEQ_D $rs1, $rs1), 1)>; - def Select_FPR64_Using_CC_GPR : SelectCC_rrirr; /// Loads diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoF.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoF.td @@ -355,23 +355,6 @@ def : PatFpr32Fpr32; def : PatFpr32Fpr32; -// Define pattern expansions for setcc operations which aren't directly -// handled by a RISC-V instruction and aren't expanded in the SelectionDAG -// Legalizer. - -def : Pat<(seto FPR32:$rs1, FPR32:$rs2), - (AND (FEQ_S FPR32:$rs1, FPR32:$rs1), - (FEQ_S FPR32:$rs2, FPR32:$rs2))>; -def : Pat<(seto FPR32:$rs1, FPR32:$rs1), - (FEQ_S $rs1, $rs1)>; - -def : Pat<(setuo FPR32:$rs1, FPR32:$rs2), - (SLTIU (AND (FEQ_S FPR32:$rs1, FPR32:$rs1), - (FEQ_S FPR32:$rs2, FPR32:$rs2)), - 1)>; -def : Pat<(setuo FPR32:$rs1, FPR32:$rs1), - (SLTIU (FEQ_S $rs1, $rs1), 1)>; - def Select_FPR32_Using_CC_GPR : SelectCC_rrirr; /// Loads diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -421,7 +421,7 @@ ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: feq.d a2, ft1, ft1 ; RV32IFD-NEXT: and a1, a2, a1 -; RV32IFD-NEXT: seqz a1, a1 +; RV32IFD-NEXT: xori a1, a1, 1 ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: bnez a0, .LBB9_2 ; RV32IFD-NEXT: # %bb.1: # %if.else @@ -441,7 +441,7 @@ ; RV64IFD-NEXT: feq.d a1, ft0, ft0 ; RV64IFD-NEXT: feq.d a2, ft1, ft1 ; RV64IFD-NEXT: and a1, a2, a1 -; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: xori a1, a1, 1 ; RV64IFD-NEXT: or a0, a0, a1 ; RV64IFD-NEXT: bnez a0, .LBB9_2 ; RV64IFD-NEXT: # %bb.1: # %if.else @@ -699,8 +699,8 @@ ; RV32IFD-NEXT: feq.d a0, ft1, ft1 ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: and a0, a1, a0 -; RV32IFD-NEXT: seqz a0, a0 -; RV32IFD-NEXT: bnez a0, .LBB15_2 +; RV32IFD-NEXT: addi a1, zero, 1 +; RV32IFD-NEXT: bne a0, a1, .LBB15_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 @@ -717,8 +717,8 @@ ; RV64IFD-NEXT: feq.d a0, ft1, ft1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 ; RV64IFD-NEXT: and a0, a1, a0 -; RV64IFD-NEXT: seqz a0, a0 -; RV64IFD-NEXT: bnez a0, .LBB15_2 +; RV64IFD-NEXT: addi a1, zero, 1 +; RV64IFD-NEXT: bne a0, a1, .LBB15_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) ; RV64IFD-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -222,7 +222,7 @@ ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: feq.d a2, ft1, ft1 ; RV32IFD-NEXT: and a1, a2, a1 -; RV32IFD-NEXT: seqz a1, a1 +; RV32IFD-NEXT: xori a1, a1, 1 ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret @@ -235,7 +235,7 @@ ; RV64IFD-NEXT: feq.d a1, ft0, ft0 ; RV64IFD-NEXT: feq.d a2, ft1, ft1 ; RV64IFD-NEXT: and a1, a2, a1 -; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: xori a1, a1, 1 ; RV64IFD-NEXT: or a0, a0, a1 ; RV64IFD-NEXT: ret %1 = fcmp ueq double %a, %b @@ -391,7 +391,7 @@ ; RV32IFD-NEXT: feq.d a0, ft1, ft1 ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: and a0, a1, a0 -; RV32IFD-NEXT: seqz a0, a0 +; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; @@ -402,7 +402,7 @@ ; RV64IFD-NEXT: feq.d a0, ft1, ft1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 ; RV64IFD-NEXT: and a0, a1, a0 -; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b %2 = zext i1 %1 to i32 diff --git a/llvm/test/CodeGen/RISCV/double-isnan.ll b/llvm/test/CodeGen/RISCV/double-isnan.ll --- a/llvm/test/CodeGen/RISCV/double-isnan.ll +++ b/llvm/test/CodeGen/RISCV/double-isnan.ll @@ -8,13 +8,13 @@ ; RV32IFD-LABEL: double_is_nan: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: feq.d a0, fa0, fa0 -; RV32IFD-NEXT: seqz a0, a0 +; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: double_is_nan: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: feq.d a0, fa0, fa0 -; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: ret %1 = fcmp uno double %a, 0.000000e+00 ret i1 %1 diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -308,7 +308,7 @@ ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: feq.d a2, ft1, ft1 ; RV32IFD-NEXT: and a1, a2, a1 -; RV32IFD-NEXT: seqz a1, a1 +; RV32IFD-NEXT: xori a1, a1, 1 ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: bnez a0, .LBB8_2 ; RV32IFD-NEXT: # %bb.1: @@ -328,7 +328,7 @@ ; RV64IFD-NEXT: feq.d a1, ft1, ft1 ; RV64IFD-NEXT: feq.d a2, ft0, ft0 ; RV64IFD-NEXT: and a1, a2, a1 -; RV64IFD-NEXT: seqz a1, a1 +; RV64IFD-NEXT: xori a1, a1, 1 ; RV64IFD-NEXT: or a0, a0, a1 ; RV64IFD-NEXT: bnez a0, .LBB8_2 ; RV64IFD-NEXT: # %bb.1: @@ -550,7 +550,7 @@ ; RV32IFD-NEXT: feq.d a0, ft1, ft1 ; RV32IFD-NEXT: feq.d a1, ft0, ft0 ; RV32IFD-NEXT: and a0, a1, a0 -; RV32IFD-NEXT: seqz a0, a0 +; RV32IFD-NEXT: xori a0, a0, 1 ; RV32IFD-NEXT: bnez a0, .LBB14_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: fmv.d ft0, ft1 @@ -568,7 +568,7 @@ ; RV64IFD-NEXT: feq.d a0, ft1, ft1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 ; RV64IFD-NEXT: and a0, a1, a0 -; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: xori a0, a0, 1 ; RV64IFD-NEXT: bnez a0, .LBB14_2 ; RV64IFD-NEXT: # %bb.1: ; RV64IFD-NEXT: fmv.d ft0, ft1 diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -386,7 +386,7 @@ ; RV32IF-NEXT: feq.s a1, ft0, ft0 ; RV32IF-NEXT: feq.s a2, ft1, ft1 ; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: seqz a1, a1 +; RV32IF-NEXT: xori a1, a1, 1 ; RV32IF-NEXT: or a0, a0, a1 ; RV32IF-NEXT: bnez a0, .LBB9_2 ; RV32IF-NEXT: # %bb.1: # %if.else @@ -406,7 +406,7 @@ ; RV64IF-NEXT: feq.s a1, ft0, ft0 ; RV64IF-NEXT: feq.s a2, ft1, ft1 ; RV64IF-NEXT: and a1, a2, a1 -; RV64IF-NEXT: seqz a1, a1 +; RV64IF-NEXT: xori a1, a1, 1 ; RV64IF-NEXT: or a0, a0, a1 ; RV64IF-NEXT: bnez a0, .LBB9_2 ; RV64IF-NEXT: # %bb.1: # %if.else @@ -640,8 +640,8 @@ ; RV32IF-NEXT: feq.s a0, ft1, ft1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 ; RV32IF-NEXT: and a0, a1, a0 -; RV32IF-NEXT: seqz a0, a0 -; RV32IF-NEXT: bnez a0, .LBB15_2 +; RV32IF-NEXT: addi a1, zero, 1 +; RV32IF-NEXT: bne a0, a1, .LBB15_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) ; RV32IF-NEXT: addi sp, sp, 16 @@ -658,8 +658,8 @@ ; RV64IF-NEXT: feq.s a0, ft1, ft1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 ; RV64IF-NEXT: and a0, a1, a0 -; RV64IF-NEXT: seqz a0, a0 -; RV64IF-NEXT: bnez a0, .LBB15_2 +; RV64IF-NEXT: addi a1, zero, 1 +; RV64IF-NEXT: bne a0, a1, .LBB15_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) ; RV64IF-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -175,7 +175,7 @@ ; RV32IF-NEXT: feq.s a1, ft0, ft0 ; RV32IF-NEXT: feq.s a2, ft1, ft1 ; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: seqz a1, a1 +; RV32IF-NEXT: xori a1, a1, 1 ; RV32IF-NEXT: or a0, a0, a1 ; RV32IF-NEXT: ret ; @@ -187,7 +187,7 @@ ; RV64IF-NEXT: feq.s a1, ft0, ft0 ; RV64IF-NEXT: feq.s a2, ft1, ft1 ; RV64IF-NEXT: and a1, a2, a1 -; RV64IF-NEXT: seqz a1, a1 +; RV64IF-NEXT: xori a1, a1, 1 ; RV64IF-NEXT: or a0, a0, a1 ; RV64IF-NEXT: ret %1 = fcmp ueq float %a, %b @@ -308,7 +308,7 @@ ; RV32IF-NEXT: feq.s a0, ft1, ft1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 ; RV32IF-NEXT: and a0, a1, a0 -; RV32IF-NEXT: seqz a0, a0 +; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcmp_uno: @@ -318,7 +318,7 @@ ; RV64IF-NEXT: feq.s a0, ft1, ft1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 ; RV64IF-NEXT: and a0, a1, a0 -; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret %1 = fcmp uno float %a, %b %2 = zext i1 %1 to i32 diff --git a/llvm/test/CodeGen/RISCV/float-isnan.ll b/llvm/test/CodeGen/RISCV/float-isnan.ll --- a/llvm/test/CodeGen/RISCV/float-isnan.ll +++ b/llvm/test/CodeGen/RISCV/float-isnan.ll @@ -8,13 +8,13 @@ ; RV32IF-LABEL: float_is_nan: ; RV32IF: # %bb.0: ; RV32IF-NEXT: feq.s a0, fa0, fa0 -; RV32IF-NEXT: seqz a0, a0 +; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: float_is_nan: ; RV64IF: # %bb.0: ; RV64IF-NEXT: feq.s a0, fa0, fa0 -; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: ret %1 = fcmp uno float %a, 0.000000e+00 ret i1 %1 diff --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll @@ -246,7 +246,7 @@ ; RV32IF-NEXT: feq.s a1, ft1, ft1 ; RV32IF-NEXT: feq.s a2, ft0, ft0 ; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: seqz a1, a1 +; RV32IF-NEXT: xori a1, a1, 1 ; RV32IF-NEXT: or a0, a0, a1 ; RV32IF-NEXT: bnez a0, .LBB8_2 ; RV32IF-NEXT: # %bb.1: @@ -263,7 +263,7 @@ ; RV64IF-NEXT: feq.s a1, ft1, ft1 ; RV64IF-NEXT: feq.s a2, ft0, ft0 ; RV64IF-NEXT: and a1, a2, a1 -; RV64IF-NEXT: seqz a1, a1 +; RV64IF-NEXT: xori a1, a1, 1 ; RV64IF-NEXT: or a0, a0, a1 ; RV64IF-NEXT: bnez a0, .LBB8_2 ; RV64IF-NEXT: # %bb.1: @@ -440,7 +440,7 @@ ; RV32IF-NEXT: feq.s a0, ft1, ft1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 ; RV32IF-NEXT: and a0, a1, a0 -; RV32IF-NEXT: seqz a0, a0 +; RV32IF-NEXT: xori a0, a0, 1 ; RV32IF-NEXT: bnez a0, .LBB14_2 ; RV32IF-NEXT: # %bb.1: ; RV32IF-NEXT: fmv.s ft0, ft1 @@ -455,7 +455,7 @@ ; RV64IF-NEXT: feq.s a0, ft1, ft1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 ; RV64IF-NEXT: and a0, a1, a0 -; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: xori a0, a0, 1 ; RV64IF-NEXT: bnez a0, .LBB14_2 ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: fmv.s ft0, ft1