diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8429,8 +8429,20 @@ if (!N0.hasOneUse() || !N1.hasOneUse()) return SDValue(); - // RHS of both xors needs to be 1. - if (!isOneConstant(N0.getOperand(1)) || !isOneConstant(N1.getOperand(1))) + SDValue N01 = N0.getOperand(1); + SDValue N11 = N1.getOperand(1); + + // For AND, SimplifyDemandedBits may have turned one of the (xor X, 1) into + // (xor X, -1) based on the upper bits of the other operand being 0. If the + // operation is And, allow one of the Xors to use -1. + if (isOneConstant(N01)) { + if (!isOneConstant(N11) && !(IsAnd && isAllOnesConstant(N11))) + return SDValue(); + } else if (isOneConstant(N11)) { + // N01 and N11 being 1 was already handled. Handle N11==1 and N01==-1. + if (!(IsAnd && isAllOnesConstant(N01))) + return SDValue(); + } else return SDValue(); EVT VT = N->getValueType(0); diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll --- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll +++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll @@ -28,15 +28,12 @@ ; RV32IFD-NEXT: lui a0, %hi(.LCPI1_1) ; RV32IFD-NEXT: fld ft2, %lo(.LCPI1_1)(a0) ; RV32IFD-NEXT: flt.d a0, ft0, ft1 -; RV32IFD-NEXT: not a0, a0 ; RV32IFD-NEXT: flt.d a1, ft2, ft0 -; RV32IFD-NEXT: xori a1, a1, 1 -; RV32IFD-NEXT: and a0, a0, a1 -; RV32IFD-NEXT: bnez a0, .LBB1_2 +; RV32IFD-NEXT: or a0, a0, a1 +; RV32IFD-NEXT: beqz a0, .LBB1_2 ; RV32IFD-NEXT: # %bb.1: # %if.then ; RV32IFD-NEXT: call abort@plt ; RV32IFD-NEXT: .LBB1_2: # %if.end -; RV32IFD-NEXT: li a0, 0 ; RV32IFD-NEXT: call exit@plt entry: %call = call double @test(double 2.000000e+00) diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll --- a/llvm/test/CodeGen/RISCV/setcc-logic.ll +++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll @@ -123,19 +123,17 @@ ; RV32I-LABEL: and_icmp_sge: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a0, a1 -; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: slt a1, a2, a3 -; RV32I-NEXT: xori a1, a1, 1 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmp_sge: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a0, a1 -; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: slt a1, a2, a3 -; RV64I-NEXT: xori a1, a1, 1 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ret %cmp1 = icmp sge i32 %a, %b %cmp2 = icmp sge i32 %c, %d @@ -147,19 +145,17 @@ ; RV32I-LABEL: and_icmp_sle: ; RV32I: # %bb.0: ; RV32I-NEXT: slt a0, a1, a0 -; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: slt a1, a3, a2 -; RV32I-NEXT: xori a1, a1, 1 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmp_sle: ; RV64I: # %bb.0: ; RV64I-NEXT: slt a0, a1, a0 -; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: slt a1, a3, a2 -; RV64I-NEXT: xori a1, a1, 1 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ret %cmp1 = icmp sle i32 %a, %b %cmp2 = icmp sle i32 %c, %d @@ -171,19 +167,17 @@ ; RV32I-LABEL: and_icmp_uge: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a0, a1 -; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: sltu a1, a2, a3 -; RV32I-NEXT: xori a1, a1, 1 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmp_uge: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a0, a1 -; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: sltu a1, a2, a3 -; RV64I-NEXT: xori a1, a1, 1 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ret %cmp1 = icmp uge i32 %a, %b %cmp2 = icmp uge i32 %c, %d @@ -195,19 +189,17 @@ ; RV32I-LABEL: and_icmp_ule: ; RV32I: # %bb.0: ; RV32I-NEXT: sltu a0, a1, a0 -; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: sltu a1, a3, a2 -; RV32I-NEXT: xori a1, a1, 1 -; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: xori a0, a0, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmp_ule: ; RV64I: # %bb.0: ; RV64I-NEXT: sltu a0, a1, a0 -; RV64I-NEXT: not a0, a0 ; RV64I-NEXT: sltu a1, a3, a2 -; RV64I-NEXT: xori a1, a1, 1 -; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: xori a0, a0, 1 ; RV64I-NEXT: ret %cmp1 = icmp ule i32 %a, %b %cmp2 = icmp ule i32 %c, %d