diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -315,7 +315,7 @@ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override; bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCtlz() const override; - bool hasAndNot(SDValue Y) const override; + bool hasAndNotCompare(SDValue Y) const override; bool shouldSinkOperands(Instruction *I, SmallVectorImpl &Ops) const override; bool isFPImmLegal(const APFloat &Imm, EVT VT, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1166,7 +1166,7 @@ return Subtarget.hasStdExtZbb(); } -bool RISCVTargetLowering::hasAndNot(SDValue Y) const { +bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const { EVT VT = Y.getValueType(); // FIXME: Support vectors once we have tests. diff --git a/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll --- a/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv32zbb-zbp.ll @@ -557,3 +557,115 @@ %1 = ashr i16 %a, 9 ret i16 %1 } + +define i1 @andn_seqz_i32(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: andn_seqz_i32: +; RV32I: # %bb.0: +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: seqz a0, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: andn_seqz_i32: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andn a0, a1, a0 +; RV32ZBB-NEXT: seqz a0, a0 +; RV32ZBB-NEXT: ret +; +; RV32ZBP-LABEL: andn_seqz_i32: +; RV32ZBP: # %bb.0: +; RV32ZBP-NEXT: and a0, a0, a1 +; RV32ZBP-NEXT: xor a0, a0, a1 +; RV32ZBP-NEXT: seqz a0, a0 +; RV32ZBP-NEXT: ret + %and = and i32 %a, %b + %cmpeq = icmp eq i32 %and, %b + ret i1 %cmpeq +} + +define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind { +; RV32I-LABEL: andn_seqz_i64: +; RV32I: # %bb.0: +; RV32I-NEXT: not a0, a0 +; RV32I-NEXT: not a1, a1 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: seqz a0, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: andn_seqz_i64: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andn a1, a3, a1 +; RV32ZBB-NEXT: andn a0, a2, a0 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: seqz a0, a0 +; RV32ZBB-NEXT: ret +; +; RV32ZBP-LABEL: andn_seqz_i64: +; RV32ZBP: # %bb.0: +; RV32ZBP-NEXT: andn a1, a3, a1 +; RV32ZBP-NEXT: andn a0, a2, a0 +; RV32ZBP-NEXT: or a0, a0, a1 +; RV32ZBP-NEXT: seqz a0, a0 +; RV32ZBP-NEXT: ret + %and = and i64 %a, %b + %cmpeq = icmp eq i64 %and, %b + ret i1 %cmpeq +} + +define i1 @andn_ne_i32(i32 %a, i32 %b) nounwind { +; RV32I-LABEL: andn_ne_i32: +; RV32I: # %bb.0: +; RV32I-NEXT: and a0, a0, a1 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: andn_ne_i32: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andn a0, a1, a0 +; RV32ZBB-NEXT: snez a0, a0 +; RV32ZBB-NEXT: ret +; +; RV32ZBP-LABEL: andn_ne_i32: +; RV32ZBP: # %bb.0: +; RV32ZBP-NEXT: and a0, a0, a1 +; RV32ZBP-NEXT: xor a0, a0, a1 +; RV32ZBP-NEXT: snez a0, a0 +; RV32ZBP-NEXT: ret + %and = and i32 %a, %b + %cmpeq = icmp ne i32 %and, %b + ret i1 %cmpeq +} + +define i1 @andn_ne_i64(i64 %a, i64 %b) nounwind { +; RV32I-LABEL: andn_ne_i64: +; RV32I: # %bb.0: +; RV32I-NEXT: not a0, a0 +; RV32I-NEXT: not a1, a1 +; RV32I-NEXT: and a1, a1, a3 +; RV32I-NEXT: and a0, a0, a2 +; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: andn_ne_i64: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: andn a1, a3, a1 +; RV32ZBB-NEXT: andn a0, a2, a0 +; RV32ZBB-NEXT: or a0, a0, a1 +; RV32ZBB-NEXT: snez a0, a0 +; RV32ZBB-NEXT: ret +; +; RV32ZBP-LABEL: andn_ne_i64: +; RV32ZBP: # %bb.0: +; RV32ZBP-NEXT: andn a1, a3, a1 +; RV32ZBP-NEXT: andn a0, a2, a0 +; RV32ZBP-NEXT: or a0, a0, a1 +; RV32ZBP-NEXT: snez a0, a0 +; RV32ZBP-NEXT: ret + %and = and i64 %a, %b + %cmpeq = icmp ne i64 %and, %b + ret i1 %cmpeq +} diff --git a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll --- a/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll +++ b/llvm/test/CodeGen/RISCV/rv64zbb-zbp.ll @@ -628,3 +628,113 @@ %1 = ashr i16 %a, 9 ret i16 %1 } + +define i1 @andn_seqz_i32(i32 %a, i32 %b) nounwind { +; RV64I-LABEL: andn_seqz_i32: +; RV64I: # %bb.0: +; RV64I-NEXT: sext.w a2, a1 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: xor a0, a0, a2 +; RV64I-NEXT: seqz a0, a0 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: andn_seqz_i32: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andn a0, a1, a0 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: seqz a0, a0 +; RV64ZBB-NEXT: ret +; +; RV64ZBP-LABEL: andn_seqz_i32: +; RV64ZBP: # %bb.0: +; RV64ZBP-NEXT: sext.w a2, a1 +; RV64ZBP-NEXT: and a0, a0, a1 +; RV64ZBP-NEXT: sext.w a0, a0 +; RV64ZBP-NEXT: xor a0, a0, a2 +; RV64ZBP-NEXT: seqz a0, a0 +; RV64ZBP-NEXT: ret + %and = and i32 %a, %b + %cmpeq = icmp eq i32 %and, %b + ret i1 %cmpeq +} + +define i1 @andn_seqz_i64(i64 %a, i64 %b) nounwind { +; RV64I-LABEL: andn_seqz_i64: +; RV64I: # %bb.0: +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: seqz a0, a0 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: andn_seqz_i64: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andn a0, a1, a0 +; RV64ZBB-NEXT: seqz a0, a0 +; RV64ZBB-NEXT: ret +; +; RV64ZBP-LABEL: andn_seqz_i64: +; RV64ZBP: # %bb.0: +; RV64ZBP-NEXT: and a0, a0, a1 +; RV64ZBP-NEXT: xor a0, a0, a1 +; RV64ZBP-NEXT: seqz a0, a0 +; RV64ZBP-NEXT: ret + %and = and i64 %a, %b + %cmpeq = icmp eq i64 %and, %b + ret i1 %cmpeq +} + +define i1 @andn_ne_i32(i32 %a, i32 %b) nounwind { +; RV64I-LABEL: andn_ne_i32: +; RV64I: # %bb.0: +; RV64I-NEXT: sext.w a2, a1 +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: sext.w a0, a0 +; RV64I-NEXT: xor a0, a0, a2 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: andn_ne_i32: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andn a0, a1, a0 +; RV64ZBB-NEXT: sext.w a0, a0 +; RV64ZBB-NEXT: snez a0, a0 +; RV64ZBB-NEXT: ret +; +; RV64ZBP-LABEL: andn_ne_i32: +; RV64ZBP: # %bb.0: +; RV64ZBP-NEXT: sext.w a2, a1 +; RV64ZBP-NEXT: and a0, a0, a1 +; RV64ZBP-NEXT: sext.w a0, a0 +; RV64ZBP-NEXT: xor a0, a0, a2 +; RV64ZBP-NEXT: snez a0, a0 +; RV64ZBP-NEXT: ret + %and = and i32 %a, %b + %cmpeq = icmp ne i32 %and, %b + ret i1 %cmpeq +} + +define i1 @andn_ne_i64(i64 %a, i64 %b) nounwind { +; RV64I-LABEL: andn_ne_i64: +; RV64I: # %bb.0: +; RV64I-NEXT: and a0, a0, a1 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: snez a0, a0 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: andn_ne_i64: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: andn a0, a1, a0 +; RV64ZBB-NEXT: snez a0, a0 +; RV64ZBB-NEXT: ret +; +; RV64ZBP-LABEL: andn_ne_i64: +; RV64ZBP: # %bb.0: +; RV64ZBP-NEXT: and a0, a0, a1 +; RV64ZBP-NEXT: xor a0, a0, a1 +; RV64ZBP-NEXT: snez a0, a0 +; RV64ZBP-NEXT: ret + %and = and i64 %a, %b + %cmpeq = icmp ne i64 %and, %b + ret i1 %cmpeq +}