diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -620,6 +620,14 @@ /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types, /// this override can be removed. bool mergeStoresAfterLegalization(EVT VT) const override; + + /// Disable normalizing + /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and + /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) + /// RISCV doesn't have flags so it's better to perform the and/or in a GPR. + bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override { + return false; + }; }; namespace RISCV { diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -963,9 +963,6 @@ // Jumps are expensive, compared to logic setJumpIsExpensive(); - // We can use any register for comparisons - setHasMultipleConditionRegisters(); - setTargetDAGCombine(ISD::ADD); setTargetDAGCombine(ISD::SUB); setTargetDAGCombine(ISD::AND); diff --git a/llvm/test/CodeGen/RISCV/sink-icmp.ll b/llvm/test/CodeGen/RISCV/sink-icmp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/sink-icmp.ll @@ -0,0 +1,73 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32IBT %s +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64IBT %s + +define dso_local signext i16 @func(i16* %a, i16* %b) { +; RV32I-LABEL: func: +; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lh a0, 0(a0) +; RV32I-NEXT: bltz a0, .LBB0_3 +; RV32I-NEXT: # %bb.1: # %.LBB0_1 +; RV32I-NEXT: beqz a1, .LBB0_3 +; RV32I-NEXT: # %bb.2: # %.LBB0_2 +; RV32I-NEXT: ret +; RV32I-NEXT: .LBB0_3: # %return +; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: ret +; +; RV32IBT-LABEL: func: +; RV32IBT: # %bb.0: # %entry +; RV32IBT-NEXT: lh a0, 0(a0) +; RV32IBT-NEXT: bltz a0, .LBB0_3 +; RV32IBT-NEXT: # %bb.1: # %.LBB0_1 +; RV32IBT-NEXT: beqz a1, .LBB0_3 +; RV32IBT-NEXT: # %bb.2: # %.LBB0_2 +; RV32IBT-NEXT: ret +; RV32IBT-NEXT: .LBB0_3: # %return +; RV32IBT-NEXT: mv a0, zero +; RV32IBT-NEXT: ret +; +; RV64I-LABEL: func: +; RV64I: # %bb.0: # %entry +; RV64I-NEXT: lh a0, 0(a0) +; RV64I-NEXT: bltz a0, .LBB0_3 +; RV64I-NEXT: # %bb.1: # %.LBB0_1 +; RV64I-NEXT: beqz a1, .LBB0_3 +; RV64I-NEXT: # %bb.2: # %.LBB0_2 +; RV64I-NEXT: ret +; RV64I-NEXT: .LBB0_3: # %return +; RV64I-NEXT: mv a0, zero +; RV64I-NEXT: ret +; +; RV64IBT-LABEL: func: +; RV64IBT: # %bb.0: # %entry +; RV64IBT-NEXT: lh a0, 0(a0) +; RV64IBT-NEXT: bltz a0, .LBB0_3 +; RV64IBT-NEXT: # %bb.1: # %.LBB0_1 +; RV64IBT-NEXT: beqz a1, .LBB0_3 +; RV64IBT-NEXT: # %bb.2: # %.LBB0_2 +; RV64IBT-NEXT: ret +; RV64IBT-NEXT: .LBB0_3: # %return +; RV64IBT-NEXT: mv a0, zero +; RV64IBT-NEXT: ret +entry: + %0 = load i16, i16* %a + %cmp = icmp sgt i16 %0, -1 + %tobool.not = icmp eq i16* %b, null + br i1 %cmp, label %.LBB0_1, label %return + +.LBB0_1: + br i1 %tobool.not, label %return, label %.LBB0_2 + +.LBB0_2: + ret i16 %0 + +return: + ret i16 0 +}