diff --git a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h --- a/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h +++ b/llvm/lib/Target/LoongArch/LoongArchAsmPrinter.h @@ -39,6 +39,10 @@ // tblgen'erated function. bool emitPseudoExpansionLowering(MCStreamer &OutStreamer, const MachineInstr *MI); + // Wrapper needed for tblgenned pseudo lowering. + bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { + return lowerLoongArchMachineOperandToMCOperand(MO, MCOp, *this); + } }; } // end namespace llvm diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp --- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp @@ -66,6 +66,7 @@ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); } + setOperationAction(ISD::BR_CC, GRLenVT, Expand); setOperationAction(ISD::SELECT_CC, GRLenVT, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -106,7 +106,14 @@ let DecoderMethod = "decodeSImmOperand<16>"; } -def simm16_lsl2 : Operand { +def simm16_lsl2 : Operand, + ImmLeaf(Imm>>2);}]> { + let ParserMatchClass = SImmAsmOperand<16, "lsl2">; + let EncoderMethod = "getImmOpValueAsr2"; + let DecoderMethod = "decodeSImmOperand<16, 2>"; +} + +def simm16_lsl2_br : Operand { let ParserMatchClass = SImmAsmOperand<16, "lsl2">; let EncoderMethod = "getImmOpValueAsr2"; let DecoderMethod = "decodeSImmOperand<16, 2>"; @@ -117,13 +124,13 @@ let DecoderMethod = "decodeSImmOperand<20>"; } -def simm21_lsl2 : Operand { +def simm21_lsl2 : Operand { let ParserMatchClass = SImmAsmOperand<21, "lsl2">; let EncoderMethod = "getImmOpValueAsr2"; let DecoderMethod = "decodeSImmOperand<21, 2>"; } -def simm26_lsl2 : Operand { +def simm26_lsl2 : Operand { let ParserMatchClass = SImmAsmOperand<26, "lsl2">; let EncoderMethod = "getImmOpValueAsr2"; let DecoderMethod = "decodeSImmOperand<26, 2>"; @@ -185,7 +192,7 @@ : Fmt2R; class BrCC_2RI16 op, string opstr> - : Fmt2RI16 { let isBranch = 1; let isTerminator = 1; @@ -649,6 +656,44 @@ /// Branches and jumps +class BccPat + : Pat<(brcond (GRLenVT (CondOp GPR:$rj, GPR:$rd)), bb:$imm16), + (Inst GPR:$rj, GPR:$rd, bb:$imm16)>; + +def : BccPat; +def : BccPat; +def : BccPat; +def : BccPat; +def : BccPat; +def : BccPat; + +class BccSwapPat + : Pat<(brcond (GRLenVT (CondOp GPR:$rd, GPR:$rj)), bb:$imm16), + (InstBcc GPR:$rj, GPR:$rd, bb:$imm16)>; + +// Condition codes that don't have matching LoongArch branch instructions, but +// are trivially supported by swapping the two input operands. +def : BccSwapPat; +def : BccSwapPat; +def : BccSwapPat; +def : BccSwapPat; + +// An extra pattern is needed for a brcond without a setcc (i.e. where the +// condition was calculated elsewhere). +def : Pat<(brcond GPR:$rj, bb:$imm21), (BNEZ GPR:$rj, bb:$imm21)>; + +let isBarrier = 1, isBranch = 1, isTerminator = 1 in +def PseudoBR : Pseudo<(outs), (ins simm26_lsl2:$imm26), [(br bb:$imm26)]>, + PseudoInstExpansion<(B simm26_lsl2:$imm26)>; + +let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in +def PseudoBRIND : Pseudo<(outs), (ins GPR:$rj, simm16_lsl2:$imm16), []>, + PseudoInstExpansion<(JIRL R0, GPR:$rj, simm16_lsl2:$imm16)>; + +def : Pat<(brind GPR:$rj), (PseudoBRIND GPR:$rj, 0)>; +def : Pat<(brind (add GPR:$rj, simm16_lsl2:$imm16)), + (PseudoBRIND GPR:$rj, simm16_lsl2:$imm16)>; + let isBarrier = 1, isReturn = 1, isTerminator = 1 in def PseudoRET : Pseudo<(outs), (ins), [(loongarch_ret)]>, PseudoInstExpansion<(JIRL R0, R1, 0)>; diff --git a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp --- a/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp +++ b/llvm/lib/Target/LoongArch/LoongArchMCInstLower.cpp @@ -60,8 +60,10 @@ case MachineOperand::MO_GlobalAddress: MCOp = lowerSymbolOperand(MO, AP.getSymbolPreferLocal(*MO.getGlobal()), AP); break; - // TODO: lower special operands case MachineOperand::MO_MachineBasicBlock: + MCOp = lowerSymbolOperand(MO, MO.getMBB()->getSymbol(), AP); + break; + // TODO: lower special operands case MachineOperand::MO_BlockAddress: case MachineOperand::MO_ExternalSymbol: case MachineOperand::MO_ConstantPoolIndex: diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/br.ll @@ -0,0 +1,358 @@ +; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefixes=ALL,LA32 +; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefixes=ALL,LA64 + +define void @foo() noreturn nounwind { +; ALL-LABEL: foo: +; ALL: # %bb.0: # %entry +; ALL-NEXT: .LBB0_1: # %loop +; ALL-NEXT: # =>This Inner Loop Header: Depth=1 +; ALL-NEXT: b .LBB0_1 +entry: + br label %loop +loop: + br label %loop +} + +define void @foo_br_eq(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_eq: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: beq $a2, $a0, .LBB1_2 +; LA32-NEXT: b .LBB1_1 +; LA32-NEXT: .LBB1_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB1_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_eq: +; LA64: # %bb.0: +; LA64-NEXT: ld.wu $a2, $a1, 0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: beq $a2, $a0, .LBB1_2 +; LA64-NEXT: b .LBB1_1 +; LA64-NEXT: .LBB1_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB1_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp eq i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_ne(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_ne: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bne $a2, $a0, .LBB2_2 +; LA32-NEXT: b .LBB2_1 +; LA32-NEXT: .LBB2_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB2_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_ne: +; LA64: # %bb.0: +; LA64-NEXT: ld.wu $a2, $a1, 0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: bne $a2, $a0, .LBB2_2 +; LA64-NEXT: b .LBB2_1 +; LA64-NEXT: .LBB2_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB2_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp ne i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_slt(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_slt: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: blt $a2, $a0, .LBB3_2 +; LA32-NEXT: b .LBB3_1 +; LA32-NEXT: .LBB3_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB3_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_slt: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a2, $a1, 0 +; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: blt $a2, $a0, .LBB3_2 +; LA64-NEXT: b .LBB3_1 +; LA64-NEXT: .LBB3_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB3_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp slt i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_sge(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_sge: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bge $a2, $a0, .LBB4_2 +; LA32-NEXT: b .LBB4_1 +; LA32-NEXT: .LBB4_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB4_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_sge: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a2, $a1, 0 +; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: bge $a2, $a0, .LBB4_2 +; LA64-NEXT: b .LBB4_1 +; LA64-NEXT: .LBB4_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB4_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp sge i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_ult(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_ult: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bltu $a2, $a0, .LBB5_2 +; LA32-NEXT: b .LBB5_1 +; LA32-NEXT: .LBB5_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB5_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_ult: +; LA64: # %bb.0: +; LA64-NEXT: ld.wu $a2, $a1, 0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: bltu $a2, $a0, .LBB5_2 +; LA64-NEXT: b .LBB5_1 +; LA64-NEXT: .LBB5_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB5_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp ult i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_uge(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_uge: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bgeu $a2, $a0, .LBB6_2 +; LA32-NEXT: b .LBB6_1 +; LA32-NEXT: .LBB6_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB6_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_uge: +; LA64: # %bb.0: +; LA64-NEXT: ld.wu $a2, $a1, 0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: bgeu $a2, $a0, .LBB6_2 +; LA64-NEXT: b .LBB6_1 +; LA64-NEXT: .LBB6_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB6_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp uge i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +;; Check for condition codes that don't have a matching instruction. +define void @foo_br_sgt(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_sgt: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: blt $a0, $a2, .LBB7_2 +; LA32-NEXT: b .LBB7_1 +; LA32-NEXT: .LBB7_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB7_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_sgt: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a2, $a1, 0 +; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: blt $a0, $a2, .LBB7_2 +; LA64-NEXT: b .LBB7_1 +; LA64-NEXT: .LBB7_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB7_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp sgt i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_sle(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_sle: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bge $a0, $a2, .LBB8_2 +; LA32-NEXT: b .LBB8_1 +; LA32-NEXT: .LBB8_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB8_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_sle: +; LA64: # %bb.0: +; LA64-NEXT: ld.w $a2, $a1, 0 +; LA64-NEXT: addi.w $a0, $a0, 0 +; LA64-NEXT: bge $a0, $a2, .LBB8_2 +; LA64-NEXT: b .LBB8_1 +; LA64-NEXT: .LBB8_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB8_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp sle i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_ugt(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_ugt: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bltu $a0, $a2, .LBB9_2 +; LA32-NEXT: b .LBB9_1 +; LA32-NEXT: .LBB9_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB9_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_ugt: +; LA64: # %bb.0: +; LA64-NEXT: ld.wu $a2, $a1, 0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: bltu $a0, $a2, .LBB9_2 +; LA64-NEXT: b .LBB9_1 +; LA64-NEXT: .LBB9_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB9_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp ugt i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +define void @foo_br_ule(i32 %a, i32 *%b) nounwind { +; LA32-LABEL: foo_br_ule: +; LA32: # %bb.0: +; LA32-NEXT: ld.w $a2, $a1, 0 +; LA32-NEXT: bgeu $a0, $a2, .LBB10_2 +; LA32-NEXT: b .LBB10_1 +; LA32-NEXT: .LBB10_1: # %test +; LA32-NEXT: ld.w $a0, $a1, 0 +; LA32-NEXT: .LBB10_2: # %end +; LA32-NEXT: jirl $zero, $ra, 0 +; +; LA64-LABEL: foo_br_ule: +; LA64: # %bb.0: +; LA64-NEXT: ld.wu $a2, $a1, 0 +; LA64-NEXT: bstrpick.d $a0, $a0, 31, 0 +; LA64-NEXT: bgeu $a0, $a2, .LBB10_2 +; LA64-NEXT: b .LBB10_1 +; LA64-NEXT: .LBB10_1: # %test +; LA64-NEXT: ld.w $a0, $a1, 0 +; LA64-NEXT: .LBB10_2: # %end +; LA64-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %b + %cc = icmp ule i32 %val, %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %b + br label %end + +end: + ret void +} + +;; Check the case of a branch where the condition was generated in another +;; function. +define void @foo_br_cc(i32 *%a, i1 %cc) nounwind { +; ALL-LABEL: foo_br_cc: +; ALL: # %bb.0: +; ALL-NEXT: ld.w $a2, $a0, 0 +; ALL-NEXT: andi $a1, $a1, 1 +; ALL-NEXT: bnez $a1, .LBB11_2 +; ALL-NEXT: b .LBB11_1 +; ALL-NEXT: .LBB11_1: # %test +; ALL-NEXT: ld.w $a0, $a0, 0 +; ALL-NEXT: .LBB11_2: # %end +; ALL-NEXT: jirl $zero, $ra, 0 + %val = load volatile i32, i32* %a + br i1 %cc, label %end, label %test +test: + %tmp = load volatile i32, i32* %a + br label %end + +end: + ret void +} diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/indirectbr.ll @@ -0,0 +1,30 @@ +; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s + +define i32 @indirectbr(i8* %target) nounwind { +; CHECK-LABEL: indirectbr: +; CHECK: # %bb.0: +; CHECK-NEXT: jirl $zero, $a0, 0 +; CHECK-NEXT: .LBB0_1: # %test_label +; CHECK-NEXT: move $a0, $zero +; CHECK-NEXT: jirl $zero, $ra, 0 + indirectbr i8* %target, [label %test_label] +test_label: + br label %ret +ret: + ret i32 0 +} + +define i32 @indirectbr_with_offset(i8* %a) nounwind { +; CHECK-LABEL: indirectbr_with_offset: +; CHECK: # %bb.0: +; CHECK-NEXT: jirl $zero, $a0, 1380 +; CHECK-NEXT: .LBB1_1: # %test_label +; CHECK-NEXT: move $a0, $zero +; CHECK-NEXT: jirl $zero, $ra, 0 + %target = getelementptr inbounds i8, i8* %a, i32 1380 + indirectbr i8* %target, [label %test_label] +test_label: + br label %ret +ret: + ret i32 0 +}