diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -123,8 +123,13 @@ MF->insert(++MBB.getIterator(), NewMBB); - BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) - .addDisp(Symbol, 0, FlagsHi); + if (Symbol.isJTI()) { + BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) + .addJumpTableIndex(Symbol.getIndex(), FlagsHi); + } else { + BuildMI(NewMBB, DL, TII->get(RISCV::AUIPC), DestReg) + .addDisp(Symbol, 0, FlagsHi); + } BuildMI(NewMBB, DL, TII->get(SecondOpcode), DestReg) .addReg(DestReg) .addMBB(NewMBB, RISCVII::MO_PCREL_LO); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -236,6 +236,7 @@ SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -235,6 +235,7 @@ setOperationAction(ISD::GlobalAddress, XLenVT, Custom); setOperationAction(ISD::BlockAddress, XLenVT, Custom); setOperationAction(ISD::ConstantPool, XLenVT, Custom); + setOperationAction(ISD::JumpTable, XLenVT, Custom); setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); @@ -261,9 +262,6 @@ setMinFunctionAlignment(FunctionAlignment); setPrefFunctionAlignment(FunctionAlignment); - // Effectively disable jump table generation. - setMinimumJumpTableEntries(INT_MAX); - // Jumps are expensive, compared to logic setJumpIsExpensive(); @@ -456,6 +454,8 @@ return lowerBlockAddress(Op, DAG); case ISD::ConstantPool: return lowerConstantPool(Op, DAG); + case ISD::JumpTable: + return lowerJumpTable(Op, DAG); case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG); case ISD::SELECT: @@ -505,6 +505,11 @@ N->getOffset(), Flags); } +static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, + SelectionDAG &DAG, unsigned Flags) { + return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); +} + template SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal) const { @@ -582,6 +587,13 @@ return getAddr(N, DAG); } +SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, + SelectionDAG &DAG) const { + JumpTableSDNode *N = cast(Op); + + return getAddr(N, DAG); +} + SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG, bool UseGOT) const { diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp --- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp +++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp @@ -121,6 +121,9 @@ case MachineOperand::MO_ConstantPoolIndex: MCOp = lowerSymbolOperand(MO, AP.GetCPISymbol(MO.getIndex()), AP); break; + case MachineOperand::MO_JumpTableIndex: + MCOp = lowerSymbolOperand(MO, AP.GetJTISymbol(MO.getIndex()), AP); + break; } return true; } diff --git a/llvm/test/CodeGen/RISCV/jumptable.ll b/llvm/test/CodeGen/RISCV/jumptable.ll --- a/llvm/test/CodeGen/RISCV/jumptable.ll +++ b/llvm/test/CodeGen/RISCV/jumptable.ll @@ -5,35 +5,40 @@ define void @jt(i32 %in, i32* %out) nounwind { ; RV32I-LABEL: jt: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi a2, zero, 2 -; RV32I-NEXT: blt a2, a0, .LBB0_4 -; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: addi a2, zero, 1 -; RV32I-NEXT: beq a0, a2, .LBB0_7 -; RV32I-NEXT: # %bb.2: # %entry -; RV32I-NEXT: addi a2, zero, 2 -; RV32I-NEXT: bne a0, a2, .LBB0_10 -; RV32I-NEXT: # %bb.3: # %bb2 -; RV32I-NEXT: addi a0, zero, 3 -; RV32I-NEXT: j .LBB0_9 -; RV32I-NEXT: .LBB0_4: # %entry -; RV32I-NEXT: addi a2, zero, 3 -; RV32I-NEXT: beq a0, a2, .LBB0_8 -; RV32I-NEXT: # %bb.5: # %entry -; RV32I-NEXT: addi a2, zero, 4 -; RV32I-NEXT: bne a0, a2, .LBB0_10 -; RV32I-NEXT: # %bb.6: # %bb4 -; RV32I-NEXT: addi a0, zero, 1 -; RV32I-NEXT: j .LBB0_9 -; RV32I-NEXT: .LBB0_7: # %bb1 -; RV32I-NEXT: addi a0, zero, 4 -; RV32I-NEXT: j .LBB0_9 -; RV32I-NEXT: .LBB0_8: # %bb3 -; RV32I-NEXT: addi a0, zero, 2 -; RV32I-NEXT: .LBB0_9: # %exit -; RV32I-NEXT: sw a0, 0(a1) -; RV32I-NEXT: .LBB0_10: # %exit -; RV32I-NEXT: ret +; RV32I-NEXT: addi a0, a0, -1 +; RV32I-NEXT: addi a2, zero, 3 +; RV32I-NEXT: bltu a2, a0, .LBB0_7 +; RV32I-NEXT: # %bb.1: # %entry +; RV32I-NEXT: slli a0, a0, 2 +; RV32I-NEXT: lui a2, %hi(.LJTI0_0) +; RV32I-NEXT: addi a2, a2, %lo(.LJTI0_0) +; RV32I-NEXT: add a0, a0, a2 +; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: jr a0 +; RV32I-NEXT: .LBB0_2: # %bb1 +; RV32I-NEXT: addi a0, zero, 4 +; RV32I-NEXT: j .LBB0_6 +; RV32I-NEXT: .LBB0_3: # %bb2 +; RV32I-NEXT: addi a0, zero, 3 +; RV32I-NEXT: j .LBB0_6 +; RV32I-NEXT: .LBB0_4: # %bb3 +; RV32I-NEXT: addi a0, zero, 2 +; RV32I-NEXT: j .LBB0_6 +; RV32I-NEXT: .LBB0_5: # %bb4 +; RV32I-NEXT: addi a0, zero, 1 +; RV32I-NEXT: .LBB0_6: # %exit +; RV32I-NEXT: sw a0, 0(a1) +; RV32I-NEXT: .LBB0_7: # %exit +; RV32I-NEXT: ret +; RV32I-NEXT: .Lfunc_end0: +; RV32I-NEXT: .size jt, .Lfunc_end0-jt +; RV32I-NEXT: .section .rodata,"a",@progbits +; RV32I-NEXT: .p2align 2 +; RV32I-NEXT: .LJTI0_0: +; RV32I-NEXT: .word .LBB0_2 +; RV32I-NEXT: .word .LBB0_3 +; RV32I-NEXT: .word .LBB0_4 +; RV32I-NEXT: .word .LBB0_5 entry: switch i32 %in, label %exit [ i32 1, label %bb1