diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -123,8 +123,13 @@ MF->insert(++MBB.getIterator(), NewMBB); - BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) - .addDisp(Symbol, 0, FlagsHi); + if (Symbol.isJTI()) { + BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) + .addJumpTableIndex(Symbol.getIndex(), FlagsHi); + } else { + BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) + .addDisp(Symbol, 0, FlagsHi); + } BuildMI(NewMBB, DL, TII->get(SecondOpcode), DestReg) .addReg(DestReg) .addMBB(NewMBB, RISCVII::MO_PCREL_LO); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -236,6 +236,7 @@ SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -235,6 +235,7 @@ setOperationAction(ISD::GlobalAddress, XLenVT, Custom); setOperationAction(ISD::BlockAddress, XLenVT, Custom); setOperationAction(ISD::ConstantPool, XLenVT, Custom); + setOperationAction(ISD::JumpTable, XLenVT, Custom); setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); @@ -261,9 +262,6 @@ setMinFunctionAlignment(FunctionAlignment); setPrefFunctionAlignment(FunctionAlignment); - // Effectively disable jump table generation. - setMinimumJumpTableEntries(INT_MAX); - // Jumps are expensive, compared to logic setJumpIsExpensive(); @@ -456,6 +454,8 @@ return lowerBlockAddress(Op, DAG); case ISD::ConstantPool: return lowerConstantPool(Op, DAG); + case ISD::JumpTable: + return lowerJumpTable(Op, DAG); case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG); case ISD::SELECT: @@ -505,6 +505,11 @@ N->getOffset(), Flags); } +static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); +} + template SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal) const { @@ -582,6 +587,13 @@ return getAddr(N, DAG); } +SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, + SelectionDAG &DAG) const { + JumpTableSDNode *N = cast(Op); + + return getAddr(N, DAG); +} + SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG, bool UseGOT) const { diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -121,6 +121,9 @@ case MachineOperand::MO_ConstantPoolIndex: MCOp = lowerSymbolOperand(MO, AP.GetCPISymbol(MO.getIndex()), AP); break; + case MachineOperand::MO_JumpTableIndex: + MCOp = lowerSymbolOperand(MO, AP.GetJTISymbol(MO.getIndex()), AP); + break; } return true; } diff --git a/llvm/test/CodeGen/RISCV/jumptable.ll b/llvm/test/CodeGen/RISCV/jumptable.ll --- a/llvm/test/CodeGen/RISCV/jumptable.ll +++ b/llvm/test/CodeGen/RISCV/jumptable.ll @@ -1,39 +1,133 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ -; RUN: | FileCheck %s -check-prefix=RV32I +; RUN: llc -mtriple=riscv32 -code-model=small -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-SMALL +; RUN: llc -mtriple=riscv32 -code-model=medium -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV32I-MEDIUM +; RUN: llc -mtriple=riscv64 -code-model=small -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I-SMALL +; RUN: llc -mtriple=riscv64 -code-model=medium -verify-machineinstrs < %s \ +; RUN: | FileCheck %s -check-prefix=RV64I-MEDIUM define void @jt(i32 %in, i32* %out) nounwind { -; RV32I-LABEL: jt: -; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi a2, zero, 2 -; RV32I-NEXT: blt a2, a0, .LBB0_4 -; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: addi a2, zero, 1 -; RV32I-NEXT: beq a0, a2, .LBB0_7 -; RV32I-NEXT: # %bb.2: # %entry -; RV32I-NEXT: addi a2, zero, 2 -; RV32I-NEXT: bne a0, a2, .LBB0_10 -; RV32I-NEXT: # %bb.3: # %bb2 -; RV32I-NEXT: addi a0, zero, 3 -; RV32I-NEXT: j .LBB0_9 -; RV32I-NEXT: .LBB0_4: # %entry -; RV32I-NEXT: addi a2, zero, 3 -; RV32I-NEXT: beq a0, a2, .LBB0_8 -; RV32I-NEXT: # %bb.5: # %entry -; RV32I-NEXT: addi a2, zero, 4 -; RV32I-NEXT: bne a0, a2, .LBB0_10 -; RV32I-NEXT: # %bb.6: # %bb4 -; RV32I-NEXT: addi a0, zero, 1 -; RV32I-NEXT: j .LBB0_9 -; RV32I-NEXT: .LBB0_7: # %bb1 -; RV32I-NEXT: addi a0, zero, 4 -; RV32I-NEXT: j .LBB0_9 -; RV32I-NEXT: .LBB0_8: # %bb3 -; RV32I-NEXT: addi a0, zero, 2 -; RV32I-NEXT: .LBB0_9: # %exit -; RV32I-NEXT: sw a0, 0(a1) -; RV32I-NEXT: .LBB0_10: # %exit -; RV32I-NEXT: ret +; RV32I-SMALL-LABEL: jt: +; RV32I-SMALL: # %bb.0: # %entry +; RV32I-SMALL-NEXT: addi a0, a0, -1 +; RV32I-SMALL-NEXT: addi a2, zero, 3 +; RV32I-SMALL-NEXT: bltu a2, a0, .LBB0_7 +; RV32I-SMALL-NEXT: # %bb.1: # %entry +; RV32I-SMALL-NEXT: slli a0, a0, 2 +; RV32I-SMALL-NEXT: lui a2, %hi(.LJTI0_0) +; RV32I-SMALL-NEXT: addi a2, a2, %lo(.LJTI0_0) +; RV32I-SMALL-NEXT: add a0, a0, a2 +; RV32I-SMALL-NEXT: lw a0, 0(a0) +; RV32I-SMALL-NEXT: jr a0 +; RV32I-SMALL-NEXT: .LBB0_2: # %bb1 +; RV32I-SMALL-NEXT: addi a0, zero, 4 +; RV32I-SMALL-NEXT: j .LBB0_6 +; RV32I-SMALL-NEXT: .LBB0_3: # %bb2 +; RV32I-SMALL-NEXT: addi a0, zero, 3 +; RV32I-SMALL-NEXT: j .LBB0_6 +; RV32I-SMALL-NEXT: .LBB0_4: # %bb3 +; RV32I-SMALL-NEXT: addi a0, zero, 2 +; RV32I-SMALL-NEXT: j .LBB0_6 +; RV32I-SMALL-NEXT: .LBB0_5: # %bb4 +; RV32I-SMALL-NEXT: addi a0, zero, 1 +; RV32I-SMALL-NEXT: .LBB0_6: # %exit +; RV32I-SMALL-NEXT: sw a0, 0(a1) +; RV32I-SMALL-NEXT: .LBB0_7: # %exit +; RV32I-SMALL-NEXT: ret +; +; RV32I-MEDIUM-LABEL: jt: +; RV32I-MEDIUM: # %bb.0: # %entry +; RV32I-MEDIUM-NEXT: addi a0, a0, -1 +; RV32I-MEDIUM-NEXT: addi a2, zero, 3 +; RV32I-MEDIUM-NEXT: bltu a2, a0, .LBB0_7 +; RV32I-MEDIUM-NEXT: # %bb.1: # %entry +; RV32I-MEDIUM-NEXT: slli a0, a0, 2 +; RV32I-MEDIUM-NEXT: .LBB0_8: # %entry +; RV32I-MEDIUM-NEXT: # Label of block must be emitted +; RV32I-MEDIUM-NEXT: auipc a2, %pcrel_hi(.LJTI0_0) +; RV32I-MEDIUM-NEXT: addi a2, a2, %pcrel_lo(.LBB0_8) +; RV32I-MEDIUM-NEXT: add a0, a0, a2 +; RV32I-MEDIUM-NEXT: lw a0, 0(a0) +; RV32I-MEDIUM-NEXT: jr a0 +; RV32I-MEDIUM-NEXT: .LBB0_2: # %bb1 +; RV32I-MEDIUM-NEXT: addi a0, zero, 4 +; RV32I-MEDIUM-NEXT: j .LBB0_6 +; RV32I-MEDIUM-NEXT: .LBB0_3: # %bb2 +; RV32I-MEDIUM-NEXT: addi a0, zero, 3 +; RV32I-MEDIUM-NEXT: j .LBB0_6 +; RV32I-MEDIUM-NEXT: .LBB0_4: # %bb3 +; RV32I-MEDIUM-NEXT: addi a0, zero, 2 +; RV32I-MEDIUM-NEXT: j .LBB0_6 +; RV32I-MEDIUM-NEXT: .LBB0_5: # %bb4 +; RV32I-MEDIUM-NEXT: addi a0, zero, 1 +; RV32I-MEDIUM-NEXT: .LBB0_6: # %exit +; RV32I-MEDIUM-NEXT: sw a0, 0(a1) +; RV32I-MEDIUM-NEXT: .LBB0_7: # %exit +; RV32I-MEDIUM-NEXT: ret +; +; RV64I-SMALL-LABEL: jt: +; RV64I-SMALL: # %bb.0: # %entry +; RV64I-SMALL-NEXT: slli a0, a0, 32 +; RV64I-SMALL-NEXT: srli a0, a0, 32 +; RV64I-SMALL-NEXT: addi a0, a0, -1 +; RV64I-SMALL-NEXT: addi a2, zero, 3 +; RV64I-SMALL-NEXT: bltu a2, a0, .LBB0_7 +; RV64I-SMALL-NEXT: # %bb.1: # %entry +; RV64I-SMALL-NEXT: slli a0, a0, 3 +; RV64I-SMALL-NEXT: lui a2, %hi(.LJTI0_0) +; RV64I-SMALL-NEXT: addi a2, a2, %lo(.LJTI0_0) +; RV64I-SMALL-NEXT: add a0, a0, a2 +; RV64I-SMALL-NEXT: ld a0, 0(a0) +; RV64I-SMALL-NEXT: jr a0 +; RV64I-SMALL-NEXT: .LBB0_2: # %bb1 +; RV64I-SMALL-NEXT: addi a0, zero, 4 +; RV64I-SMALL-NEXT: j .LBB0_6 +; RV64I-SMALL-NEXT: .LBB0_3: # %bb2 +; RV64I-SMALL-NEXT: addi a0, zero, 3 +; RV64I-SMALL-NEXT: j .LBB0_6 +; RV64I-SMALL-NEXT: .LBB0_4: # %bb3 +; RV64I-SMALL-NEXT: addi a0, zero, 2 +; RV64I-SMALL-NEXT: j .LBB0_6 +; RV64I-SMALL-NEXT: .LBB0_5: # %bb4 +; RV64I-SMALL-NEXT: addi a0, zero, 1 +; RV64I-SMALL-NEXT: .LBB0_6: # %exit +; RV64I-SMALL-NEXT: sw a0, 0(a1) +; RV64I-SMALL-NEXT: .LBB0_7: # %exit +; RV64I-SMALL-NEXT: ret +; +; RV64I-MEDIUM-LABEL: jt: +; RV64I-MEDIUM: # %bb.0: # %entry +; RV64I-MEDIUM-NEXT: slli a0, a0, 32 +; RV64I-MEDIUM-NEXT: srli a0, a0, 32 +; RV64I-MEDIUM-NEXT: addi a0, a0, -1 +; RV64I-MEDIUM-NEXT: addi a2, zero, 3 +; RV64I-MEDIUM-NEXT: bltu a2, a0, .LBB0_7 +; RV64I-MEDIUM-NEXT: # %bb.1: # %entry +; RV64I-MEDIUM-NEXT: slli a0, a0, 3 +; RV64I-MEDIUM-NEXT: .LBB0_8: # %entry +; RV64I-MEDIUM-NEXT: # Label of block must be emitted +; RV64I-MEDIUM-NEXT: auipc a2, %pcrel_hi(.LJTI0_0) +; RV64I-MEDIUM-NEXT: addi a2, a2, %pcrel_lo(.LBB0_8) +; RV64I-MEDIUM-NEXT: add a0, a0, a2 +; RV64I-MEDIUM-NEXT: ld a0, 0(a0) +; RV64I-MEDIUM-NEXT: jr a0 +; RV64I-MEDIUM-NEXT: .LBB0_2: # %bb1 +; RV64I-MEDIUM-NEXT: addi a0, zero, 4 +; RV64I-MEDIUM-NEXT: j .LBB0_6 +; RV64I-MEDIUM-NEXT: .LBB0_3: # %bb2 +; RV64I-MEDIUM-NEXT: addi a0, zero, 3 +; RV64I-MEDIUM-NEXT: j .LBB0_6 +; RV64I-MEDIUM-NEXT: .LBB0_4: # %bb3 +; RV64I-MEDIUM-NEXT: addi a0, zero, 2 +; RV64I-MEDIUM-NEXT: j .LBB0_6 +; RV64I-MEDIUM-NEXT: .LBB0_5: # %bb4 +; RV64I-MEDIUM-NEXT: addi a0, zero, 1 +; RV64I-MEDIUM-NEXT: .LBB0_6: # %exit +; RV64I-MEDIUM-NEXT: sw a0, 0(a1) +; RV64I-MEDIUM-NEXT: .LBB0_7: # %exit +; RV64I-MEDIUM-NEXT: ret entry: switch i32 %in, label %exit [ i32 1, label %bb1