diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -966,9 +966,13 @@ def : BccSwapPat; def : BccSwapPat; -// An extra pattern is needed for a brcond without a setcc (i.e. where the +// Extra patterns are needed for a brcond without a setcc (i.e. where the // condition was calculated elsewhere). def : Pat<(brcond GPR:$cond, bb:$imm12), (BNE GPR:$cond, X0, bb:$imm12)>; +// In this pattern, the `(xor $cond, 1)` functions like (boolean) `not`, as the +// `brcond` only uses the lowest bit. +def : Pat<(brcond (XLenVT (xor GPR:$cond, 1)), bb:$imm12), + (BEQ GPR:$cond, X0, bb:$imm12)>; let isBarrier = 1, isBranch = 1, isTerminator = 1 in def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>, diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -100,8 +100,7 @@ ; RV32IFD-NEXT: sw a1, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: feq.d a0, ft1, ft0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: beqz a0, .LBB2_2 +; RV32IFD-NEXT: bnez a0, .LBB2_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -116,8 +115,7 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: feq.d a0, ft1, ft0 -; RV64IFD-NEXT: xori a0, a0, 1 -; RV64IFD-NEXT: beqz a0, .LBB2_2 +; RV64IFD-NEXT: bnez a0, .LBB2_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 @@ -460,8 +458,7 @@ ; RV32IFD-NEXT: sw a1, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: fle.d a0, ft1, ft0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: bnez a0, .LBB10_2 +; RV32IFD-NEXT: beqz a0, .LBB10_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -476,8 +473,7 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: fle.d a0, ft1, ft0 -; RV64IFD-NEXT: xori a0, a0, 1 -; RV64IFD-NEXT: bnez a0, .LBB10_2 +; RV64IFD-NEXT: beqz a0, .LBB10_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 @@ -505,8 +501,7 @@ ; RV32IFD-NEXT: sw a1, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: flt.d a0, ft1, ft0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: bnez a0, .LBB11_2 +; RV32IFD-NEXT: beqz a0, .LBB11_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -521,8 +516,7 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 -; RV64IFD-NEXT: xori a0, a0, 1 -; RV64IFD-NEXT: bnez a0, .LBB11_2 +; RV64IFD-NEXT: beqz a0, .LBB11_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 @@ -550,8 +544,7 @@ ; RV32IFD-NEXT: sw a3, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: fle.d a0, ft1, ft0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: bnez a0, .LBB12_2 +; RV32IFD-NEXT: beqz a0, .LBB12_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -566,8 +559,7 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: fle.d a0, ft1, ft0 -; RV64IFD-NEXT: xori a0, a0, 1 -; RV64IFD-NEXT: bnez a0, .LBB12_2 +; RV64IFD-NEXT: beqz a0, .LBB12_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 @@ -595,8 +587,7 @@ ; RV32IFD-NEXT: sw a3, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: flt.d a0, ft1, ft0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: bnez a0, .LBB13_2 +; RV32IFD-NEXT: beqz a0, .LBB13_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -611,8 +602,7 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a0 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: flt.d a0, ft1, ft0 -; RV64IFD-NEXT: xori a0, a0, 1 -; RV64IFD-NEXT: bnez a0, .LBB13_2 +; RV64IFD-NEXT: beqz a0, .LBB13_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 @@ -640,8 +630,7 @@ ; RV32IFD-NEXT: sw a1, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: feq.d a0, ft1, ft0 -; RV32IFD-NEXT: xori a0, a0, 1 -; RV32IFD-NEXT: bnez a0, .LBB14_2 +; RV32IFD-NEXT: beqz a0, .LBB14_2 ; RV32IFD-NEXT: # %bb.1: # %if.else ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 @@ -656,8 +645,7 @@ ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 ; RV64IFD-NEXT: feq.d a0, ft1, ft0 -; RV64IFD-NEXT: xori a0, a0, 1 -; RV64IFD-NEXT: bnez a0, .LBB14_2 +; RV64IFD-NEXT: beqz a0, .LBB14_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -93,8 +93,7 @@ ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: beqz a0, .LBB2_2 +; RV32IF-NEXT: bnez a0, .LBB2_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -109,8 +108,7 @@ ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: feq.s a0, ft1, ft0 -; RV64IF-NEXT: xori a0, a0, 1 -; RV64IF-NEXT: beqz a0, .LBB2_2 +; RV64IF-NEXT: bnez a0, .LBB2_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -421,8 +419,7 @@ ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: fle.s a0, ft1, ft0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: bnez a0, .LBB10_2 +; RV32IF-NEXT: beqz a0, .LBB10_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -437,8 +434,7 @@ ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: fle.s a0, ft1, ft0 -; RV64IF-NEXT: xori a0, a0, 1 -; RV64IF-NEXT: bnez a0, .LBB10_2 +; RV64IF-NEXT: beqz a0, .LBB10_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -462,8 +458,7 @@ ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: flt.s a0, ft1, ft0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: bnez a0, .LBB11_2 +; RV32IF-NEXT: beqz a0, .LBB11_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -478,8 +473,7 @@ ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: flt.s a0, ft1, ft0 -; RV64IF-NEXT: xori a0, a0, 1 -; RV64IF-NEXT: bnez a0, .LBB11_2 +; RV64IF-NEXT: beqz a0, .LBB11_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -503,8 +497,7 @@ ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fle.s a0, ft1, ft0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: bnez a0, .LBB12_2 +; RV32IF-NEXT: beqz a0, .LBB12_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -519,8 +512,7 @@ ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: fle.s a0, ft1, ft0 -; RV64IF-NEXT: xori a0, a0, 1 -; RV64IF-NEXT: bnez a0, .LBB12_2 +; RV64IF-NEXT: beqz a0, .LBB12_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -544,8 +536,7 @@ ; RV32IF-NEXT: fmv.w.x ft0, a0 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: flt.s a0, ft1, ft0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: bnez a0, .LBB13_2 +; RV32IF-NEXT: beqz a0, .LBB13_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -560,8 +551,7 @@ ; RV64IF-NEXT: fmv.w.x ft0, a0 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: flt.s a0, ft1, ft0 -; RV64IF-NEXT: xori a0, a0, 1 -; RV64IF-NEXT: bnez a0, .LBB13_2 +; RV64IF-NEXT: beqz a0, .LBB13_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 @@ -585,8 +575,7 @@ ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 ; RV32IF-NEXT: feq.s a0, ft1, ft0 -; RV32IF-NEXT: xori a0, a0, 1 -; RV32IF-NEXT: bnez a0, .LBB14_2 +; RV32IF-NEXT: beqz a0, .LBB14_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 16 @@ -601,8 +590,7 @@ ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 ; RV64IF-NEXT: feq.s a0, ft1, ft0 -; RV64IF-NEXT: xori a0, a0, 1 -; RV64IF-NEXT: bnez a0, .LBB14_2 +; RV64IF-NEXT: beqz a0, .LBB14_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IF-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll --- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll @@ -87,8 +87,7 @@ ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa0, fa1 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: beqz a0, .LBB2_2 +; RV32IZFH-NEXT: bnez a0, .LBB2_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -101,8 +100,7 @@ ; RV64IZFH-NEXT: addi sp, sp, -16 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa0, fa1 -; RV64IZFH-NEXT: xori a0, a0, 1 -; RV64IZFH-NEXT: beqz a0, .LBB2_2 +; RV64IZFH-NEXT: bnez a0, .LBB2_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 @@ -383,8 +381,7 @@ ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fle.h a0, fa0, fa1 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: bnez a0, .LBB10_2 +; RV32IZFH-NEXT: beqz a0, .LBB10_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -397,8 +394,7 @@ ; RV64IZFH-NEXT: addi sp, sp, -16 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: fle.h a0, fa0, fa1 -; RV64IZFH-NEXT: xori a0, a0, 1 -; RV64IZFH-NEXT: bnez a0, .LBB10_2 +; RV64IZFH-NEXT: beqz a0, .LBB10_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 @@ -420,8 +416,7 @@ ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa0, fa1 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: bnez a0, .LBB11_2 +; RV32IZFH-NEXT: beqz a0, .LBB11_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -434,8 +429,7 @@ ; RV64IZFH-NEXT: addi sp, sp, -16 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa0, fa1 -; RV64IZFH-NEXT: xori a0, a0, 1 -; RV64IZFH-NEXT: bnez a0, .LBB11_2 +; RV64IZFH-NEXT: beqz a0, .LBB11_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 @@ -457,8 +451,7 @@ ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: fle.h a0, fa1, fa0 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: bnez a0, .LBB12_2 +; RV32IZFH-NEXT: beqz a0, .LBB12_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -471,8 +464,7 @@ ; RV64IZFH-NEXT: addi sp, sp, -16 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: fle.h a0, fa1, fa0 -; RV64IZFH-NEXT: xori a0, a0, 1 -; RV64IZFH-NEXT: bnez a0, .LBB12_2 +; RV64IZFH-NEXT: beqz a0, .LBB12_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 @@ -494,8 +486,7 @@ ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: flt.h a0, fa1, fa0 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: bnez a0, .LBB13_2 +; RV32IZFH-NEXT: beqz a0, .LBB13_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -508,8 +499,7 @@ ; RV64IZFH-NEXT: addi sp, sp, -16 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: flt.h a0, fa1, fa0 -; RV64IZFH-NEXT: xori a0, a0, 1 -; RV64IZFH-NEXT: bnez a0, .LBB13_2 +; RV64IZFH-NEXT: beqz a0, .LBB13_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16 @@ -531,8 +521,7 @@ ; RV32IZFH-NEXT: addi sp, sp, -16 ; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IZFH-NEXT: feq.h a0, fa0, fa1 -; RV32IZFH-NEXT: xori a0, a0, 1 -; RV32IZFH-NEXT: bnez a0, .LBB14_2 +; RV32IZFH-NEXT: beqz a0, .LBB14_2 ; RV32IZFH-NEXT: # %bb.1: # %if.else ; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IZFH-NEXT: addi sp, sp, 16 @@ -545,8 +534,7 @@ ; RV64IZFH-NEXT: addi sp, sp, -16 ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IZFH-NEXT: feq.h a0, fa0, fa1 -; RV64IZFH-NEXT: xori a0, a0, 1 -; RV64IZFH-NEXT: bnez a0, .LBB14_2 +; RV64IZFH-NEXT: beqz a0, .LBB14_2 ; RV64IZFH-NEXT: # %bb.1: # %if.else ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IZFH-NEXT: addi sp, sp, 16