Index: lib/Target/RISCV/RISCV.h =================================================================== --- lib/Target/RISCV/RISCV.h +++ lib/Target/RISCV/RISCV.h @@ -15,15 +15,20 @@ #ifndef LLVM_LIB_TARGET_RISCV_RISCV_H #define LLVM_LIB_TARGET_RISCV_RISCV_H -#include "MCTargetDesc/RISCVMCTargetDesc.h" #include "llvm/Target/TargetMachine.h" namespace llvm { class RISCVTargetMachine; -class MCInst; +class AsmPrinter; class MachineInstr; +class MachineOperand; +class MCInst; +class MCOperand; -void LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI); +void LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI, + const AsmPrinter &AP); +bool LowerRISCVMachineOperandToMCOperand(const MachineOperand &MO, + MCOperand &MCOp, const AsmPrinter &AP); FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM); } Index: lib/Target/RISCV/RISCVAsmPrinter.cpp =================================================================== --- lib/Target/RISCV/RISCVAsmPrinter.cpp +++ lib/Target/RISCV/RISCVAsmPrinter.cpp @@ -43,6 +43,11 @@ bool emitPseudoExpansionLowering(MCStreamer &OutStreamer, const MachineInstr *MI); + + // Wrapper needed for tblgenned pseudo lowering. + bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const { + return LowerRISCVMachineOperandToMCOperand(MO, MCOp, *this); + } }; } @@ -56,7 +61,7 @@ return; MCInst TmpInst; - LowerRISCVMachineInstrToMCInst(MI, TmpInst); + LowerRISCVMachineInstrToMCInst(MI, TmpInst, *this); EmitToStreamer(*OutStreamer, TmpInst); } Index: lib/Target/RISCV/RISCVISelLowering.h =================================================================== --- lib/Target/RISCV/RISCVISelLowering.h +++ lib/Target/RISCV/RISCVISelLowering.h @@ -29,6 +29,8 @@ } class RISCVTargetLowering : public TargetLowering { + const RISCVSubtarget &Subtarget; + public: explicit RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI); @@ -54,6 +56,7 @@ Type *Ty) const override { return true; } + SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; }; } Index: lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- lib/Target/RISCV/RISCVISelLowering.cpp +++ lib/Target/RISCV/RISCVISelLowering.cpp @@ -13,7 +13,7 @@ //===----------------------------------------------------------------------===// #include "RISCVISelLowering.h" -#include "RISCV.h" +#include "MCTargetDesc/RISCVBaseInfo.h" #include "RISCVRegisterInfo.h" #include "RISCVSubtarget.h" #include "RISCVTargetMachine.h" @@ -37,7 +37,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, const RISCVSubtarget &STI) - : TargetLowering(TM) { + : TargetLowering(TM), Subtarget(STI) { // Set up the register classes. addRegisterClass(MVT::i32, &RISCV::GPRRegClass); @@ -47,10 +47,17 @@ setStackPointerRegisterToSaveRestore(RISCV::X2_32); + for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) + setLoadExtAction(N, MVT::i32, MVT::i1, Promote); + // TODO: add all necessary setOperationAction calls. + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); setBooleanContents(ZeroOrOneBooleanContent); + // Function alignments (log2). setMinFunctionAlignment(3); setPrefFunctionAlignment(3); @@ -59,11 +66,35 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { + case ISD::GlobalAddress: + return lowerGlobalAddress(Op, DAG); default: report_fatal_error("unimplemented operand"); } } +SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + EVT Ty = Op.getValueType(); + GlobalAddressSDNode *N = cast(Op); + const GlobalValue *GV = N->getGlobal(); + int64_t Offset = N->getOffset(); + + if (!isPositionIndependent() && !Subtarget.is64Bit()) { + SDValue GAHi = + DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_HI); + SDValue GALo = + DAG.getTargetGlobalAddress(GV, DL, Ty, Offset, RISCVII::MO_LO); + SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0); + SDValue MNLo = + SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0); + return MNLo; + } else { + report_fatal_error("Unable to lowerGlobalAddress"); + } +} + // Calling Convention Implementation. #include "RISCVGenCallingConv.inc" Index: lib/Target/RISCV/RISCVInstrInfo.h =================================================================== --- lib/Target/RISCV/RISCVInstrInfo.h +++ lib/Target/RISCV/RISCVInstrInfo.h @@ -29,6 +29,10 @@ RISCVInstrInfo(); const RISCVRegisterInfo &getRegisterInfo() const { return RI; } + + void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator Position, + const DebugLoc &DL, unsigned DestinationRegister, + unsigned SourceRegister, bool KillSource) const override; }; } Index: lib/Target/RISCV/RISCVInstrInfo.cpp =================================================================== --- lib/Target/RISCV/RISCVInstrInfo.cpp +++ lib/Target/RISCV/RISCVInstrInfo.cpp @@ -29,3 +29,18 @@ using namespace llvm; RISCVInstrInfo::RISCVInstrInfo() : RISCVGenInstrInfo() {} + +void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator Position, + const DebugLoc &DL, + unsigned DestinationRegister, + unsigned SourceRegister, + bool KillSource) const { + if (!RISCV::GPRRegClass.contains(DestinationRegister, SourceRegister)) { + llvm_unreachable("Impossible reg-to-reg copy"); + } + + BuildMI(MBB, Position, DL, get(RISCV::ADDI), DestinationRegister) + .addReg(SourceRegister, getKillRegState(KillSource)) + .addImm(0); +} Index: lib/Target/RISCV/RISCVInstrInfo.td =================================================================== --- lib/Target/RISCV/RISCVInstrInfo.td +++ lib/Target/RISCV/RISCVInstrInfo.td @@ -13,7 +13,6 @@ include "RISCVInstrFormats.td" -// Target-independent nodes def RetFlag : SDNode<"RISCVISD::RET_FLAG", SDTNone, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; @@ -80,6 +79,22 @@ let DecoderMethod = "decodeSImmOperandAndLsl1<21>"; } +// Extract least significant 12 bits from an immediate value and sign extend +// them. +def LO12Sext : SDNodeXFormgetTargetConstant(SignExtend64<12>(N->getZExtValue()), + SDLoc(N), MVT::i32); +}]>; + +// Extract the most significant 20 bits from an immediate value. Add 1 if bit +// 11 is 1, to compensate for the low 12 bits in the matching immediate addi +// or ld/st being negative. +def HI20 : SDNodeXFormgetTargetConstant(((N->getZExtValue()+0x800) >> 12) & 0xfffff, + SDLoc(N), MVT::i32); +}]>; + + // As noted in RISCVRegisterInfo.td, the hope is that support for // variable-sized register classes will mean that instruction definitions do // not need to be duplicated for 32-bit and 64-bit register classes. For now @@ -125,9 +140,13 @@ def LBU : LD_ri<0b100, "lbu">; def LHU : LD_ri<0b101, "lhu">; +// Operands for stores are in the order srcreg, base, offset rather than +// reflecting the order these fields are specified in the instruction +// encoding. + let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in class ST_ri funct3, string OpcodeStr> : - FS; def SB : ST_ri<0b000, "sb">; @@ -232,6 +251,11 @@ class PatGprUimm5 : Pat<(OpNode GPR:$rs1, uimm5:$shamt), (Inst GPR:$rs1, uimm5:$shamt)>; +/// Immediates + +def : Pat<(simm12:$imm), (ADDI X0_32, simm12:$imm)>; +def : Pat<(i32 imm:$imm), (ADDI (LUI (HI20 imm:$imm)), (LO12Sext imm:$imm))>; + /// Simple arithmetic operations def : PatGprGpr; @@ -262,3 +286,31 @@ let isBarrier = 1, isReturn = 1, isTerminator = 1 in def PseudoRET : Pseudo<(outs), (ins), [(RetFlag)]>, PseudoInstExpansion<(JALR X0_32, X1_32, 0)>; + +/// Loads + +multiclass LdPat { + def : Pat<(LoadOp GPR:$rs1), (Inst GPR:$rs1, 0)>; + def : Pat<(LoadOp (add GPR:$rs1, simm12:$imm12)), + (Inst GPR:$rs1, simm12:$imm12)>; +} + +defm : LdPat; +defm : LdPat; +defm : LdPat; +defm : LdPat; +defm : LdPat; +defm : LdPat; +defm : LdPat; + +/// Stores + +multiclass StPat { + def : Pat<(StoreOp GPR:$rs2, GPR:$rs1), (Inst GPR:$rs2, GPR:$rs1, 0)>; + def : Pat<(StoreOp GPR:$rs2, (add GPR:$rs1, simm12:$imm12)), + (Inst GPR:$rs2, GPR:$rs1, simm12:$imm12)>; +} + +defm : StPat; +defm : StPat; +defm : StPat; Index: lib/Target/RISCV/RISCVMCInstLower.cpp =================================================================== --- lib/Target/RISCV/RISCVMCInstLower.cpp +++ lib/Target/RISCV/RISCVMCInstLower.cpp @@ -13,40 +13,75 @@ //===----------------------------------------------------------------------===// #include "RISCV.h" -#include "llvm/CodeGen/MachineBasicBlock.h" -#include "llvm/CodeGen/MachineInstr.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCContext.h" -#include "llvm/MC/MCExpr.h" -#include "llvm/MC/MCInst.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" +#include "MCTargetDesc/RISCVBaseInfo.h" +#include "MCTargetDesc/RISCVMCExpr.h" +#include "llvm/CodeGen/AsmPrinter.h" using namespace llvm; -void llvm::LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, - MCInst &OutMI) { - OutMI.setOpcode(MI->getOpcode()); +static MCOperand lowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym, + const AsmPrinter &AP) { + MCContext &Ctx = AP.OutContext; + RISCVMCExpr::VariantKind Kind; + + switch (MO.getTargetFlags()) { + case RISCVII::MO_None: + Kind = RISCVMCExpr::VK_RISCV_None; + break; + case RISCVII::MO_LO: + Kind = RISCVMCExpr::VK_RISCV_LO; + break; + case RISCVII::MO_HI: + Kind = RISCVMCExpr::VK_RISCV_HI; + break; + default: + llvm_unreachable("Unknown target flag on GV operand"); + } - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + const MCExpr *ME = + MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, Ctx); + + if (!MO.isJTI() && MO.getOffset()) + ME = MCBinaryExpr::createAdd( + ME, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx); + + ME = RISCVMCExpr::create(ME, Kind, Ctx); + return MCOperand::createExpr(ME); +} + +bool llvm::LowerRISCVMachineOperandToMCOperand(const MachineOperand &MO, + MCOperand &MCOp, + const AsmPrinter &AP) { + switch (MO.getType()) { + default: + report_fatal_error("LowerRISCVMachineInstrToMCInst: unknown operand type"); + case MachineOperand::MO_Register: + // Ignore all implicit register operands. + if (MO.isImplicit()) + return false; + MCOp = MCOperand::createReg(MO.getReg()); + break; + case MachineOperand::MO_Immediate: + MCOp = MCOperand::createImm(MO.getImm()); + break; + case MachineOperand::MO_MachineBasicBlock: + MCOp = MCOperand::createExpr( + MCSymbolRefExpr::create(MO.getMBB()->getSymbol(), AP.OutContext)); + break; + case MachineOperand::MO_GlobalAddress: + MCOp = lowerSymbolOperand(MO, AP.getSymbol(MO.getGlobal()), AP); + break; + } + return true; +} + +void llvm::LowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI, + const AsmPrinter &AP) { + OutMI.setOpcode(MI->getOpcode()); + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp; - switch (MO.getType()) { - default: - report_fatal_error( - "LowerRISCVMachineInstrToMCInst: unknown operand type"); - case MachineOperand::MO_Register: - // Ignore all implicit register operands. - if (MO.isImplicit()) - continue; - MCOp = MCOperand::createReg(MO.getReg()); - break; - case MachineOperand::MO_Immediate: - MCOp = MCOperand::createImm(MO.getImm()); - break; - } - - OutMI.addOperand(MCOp); + if (LowerRISCVMachineOperandToMCOperand(MO, MCOp, AP)) + OutMI.addOperand(MCOp); } } Index: lib/Target/RISCV/RISCVRegisterInfo.cpp =================================================================== --- lib/Target/RISCV/RISCVRegisterInfo.cpp +++ lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -12,7 +12,7 @@ //===----------------------------------------------------------------------===// #include "RISCVRegisterInfo.h" -#include "RISCV.h" +#include "MCTargetDesc/RISCVBaseInfo.h" #include "RISCVSubtarget.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" Index: lib/Target/RISCV/RISCVSubtarget.h =================================================================== --- lib/Target/RISCV/RISCVSubtarget.h +++ lib/Target/RISCV/RISCVSubtarget.h @@ -58,6 +58,7 @@ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override { return &TSInfo; } + bool is64Bit() const { return HasRV64; } }; } // End llvm namespace Index: lib/Target/RISCV/RISCVSubtarget.cpp =================================================================== --- lib/Target/RISCV/RISCVSubtarget.cpp +++ lib/Target/RISCV/RISCVSubtarget.cpp @@ -12,7 +12,7 @@ //===----------------------------------------------------------------------===// #include "RISCVSubtarget.h" -#include "RISCV.h" +#include "MCTargetDesc/RISCVBaseInfo.h" #include "RISCVFrameLowering.h" #include "llvm/Support/TargetRegistry.h" Index: test/CodeGen/RISCV/alu.ll =================================================================== --- test/CodeGen/RISCV/alu.ll +++ test/CodeGen/RISCV/alu.ll @@ -160,3 +160,42 @@ %1 = and i32 %a, %b ret i32 %1 } + +; Materialize constants + +define i32 @zero() { +; CHECK-LABEL: zero: +; CHECK: addi a0, zero, 0 +; CHECK: jalr zero, ra, 0 + ret i32 0 +} + +define i32 @pos_small() { +; CHECK-LABEL: pos_small: +; CHECK: addi a0, zero, 2047 +; CHECK: jalr zero, ra, 0 + ret i32 2047 +} + +define i32 @neg_small() { +; CHECK-LABEL: neg_small: +; CHECK: addi a0, zero, -2048 +; CHECK: jalr zero, ra, 0 + ret i32 -2048 +} + +define i32 @pos_i32() { +; CHECK-LABEL: pos_i32: +; CHECK: lui [[REG:[a-z0-9]+]], 423811 +; CHECK: addi a0, [[REG]], -1297 +; CHECK: jalr zero, ra, 0 + ret i32 1735928559 +} + +define i32 @neg_i32() { +; CHECK-LABEL: neg_i32: +; CHECK: lui [[REG:[a-z0-9]+]], 912092 +; CHECK: addi a0, [[REG]], -273 +; CHECK: jalr zero, ra, 0 + ret i32 -559038737 +} Index: test/CodeGen/RISCV/mem.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/mem.ll @@ -0,0 +1,174 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s + +; Check indexed and unindexed, sext, zext and anyext loads + +define i32 @lb(i8 *%a) nounwind { +; CHECK-LABEL: lb: +; CHECK: lb {{[a-z0-9]+}}, 0(a0) +; CHECK: lb {{[a-z0-9]+}}, 1(a0) + %1 = getelementptr i8, i8* %a, i32 1 + %2 = load i8, i8* %1 + %3 = sext i8 %2 to i32 + ; the unused load will produce an anyext for selection + %4 = load volatile i8, i8* %a + ret i32 %3 +} + +define i32 @lh(i16 *%a) nounwind { +; CHECK-LABEL: lh: +; CHECK: lh {{[a-z0-9]+}}, 0(a0) +; CHECK: lh {{[a-z0-9]+}}, 4(a0) + %1 = getelementptr i16, i16* %a, i32 2 + %2 = load i16, i16* %1 + %3 = sext i16 %2 to i32 + ; the unused load will produce an anyext for selection + %4 = load volatile i16, i16* %a + ret i32 %3 +} + +define i32 @lw(i32 *%a) nounwind { +; CHECK-LABEL: lw: +; CHECK: lw {{[a-z0-9]+}}, 0(a0) +; CHECK: lw {{[a-z0-9]+}}, 12(a0) + %1 = getelementptr i32, i32* %a, i32 3 + %2 = load i32, i32* %1 + %3 = load volatile i32, i32* %a + ret i32 %2 +} + +define i32 @lbu(i8 *%a) nounwind { +; CHECK-LABEL: lbu: +; CHECK: lbu {{[a-z0-9]+}}, 0(a0) +; CHECK: lbu {{[a-z0-9]+}}, 4(a0) + %1 = getelementptr i8, i8* %a, i32 4 + %2 = load i8, i8* %1 + %3 = zext i8 %2 to i32 + %4 = load volatile i8, i8* %a + %5 = zext i8 %4 to i32 + %6 = add i32 %3, %5 + ret i32 %6 +} + +define i32 @lhu(i16 *%a) nounwind { +; CHECK-LABEL: lhu: +; CHECK: lhu {{[a-z0-9]+}}, 0(a0) +; CHECK: lhu {{[a-z0-9]+}}, 10(a0) + %1 = getelementptr i16, i16* %a, i32 5 + %2 = load i16, i16* %1 + %3 = zext i16 %2 to i32 + %4 = load volatile i16, i16* %a + %5 = zext i16 %4 to i32 + %6 = add i32 %3, %5 + ret i32 %6 +} + +; Check indexed and unindexed stores + +define void @sb(i8 *%a, i8 %b) nounwind { +; CHECK-LABEL: sb: +; CHECK: sb a1, 6(a0) +; CHECK: sb a1, 0(a0) + store i8 %b, i8* %a + %1 = getelementptr i8, i8* %a, i32 6 + store i8 %b, i8* %1 + ret void +} + +define void @sh(i16 *%a, i16 %b) nounwind { +; CHECK-LABEL: sh: +; CHECK: sh a1, 14(a0) +; CHECK: sh a1, 0(a0) + store i16 %b, i16* %a + %1 = getelementptr i16, i16* %a, i32 7 + store i16 %b, i16* %1 + ret void +} + +define void @sw(i32 *%a, i32 %b) nounwind { +; CHECK-LABEL: sw: +; CHECK: sw a1, 32(a0) +; CHECK: sw a1, 0(a0) + store i32 %b, i32* %a + %1 = getelementptr i32, i32* %a, i32 8 + store i32 %b, i32* %1 + ret void +} + +; Check load and store to an i1 location +define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind { +; CHECK-LABEL: load_sext_zext_anyext_i1: +; CHECK: lb a1, 0(a0) +; CHECK: lbu a1, 1(a0) +; CHECK: lbu a0, 2(a0) +; CHECK: sub a0, a0, a1 +; CHECK: jalr zero, ra, 0 + ; sextload i1 + %1 = getelementptr i1, i1* %a, i32 1 + %2 = load i1, i1* %1 + %3 = sext i1 %2 to i32 + ; zextload i1 + %4 = getelementptr i1, i1* %a, i32 2 + %5 = load i1, i1* %4 + %6 = zext i1 %5 to i32 + %7 = add i32 %3, %6 + ; extload i1 (anyext). Produced as the load is unused. + %8 = load volatile i1, i1* %a + ret i32 %7 +} + +define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { +; CHECK-LABEL: load_sext_zext_anyext_i1_i16: +; CHECK: lb a1, 0(a0) +; CHECK: lbu a1, 1(a0) +; CHECK: lbu a0, 2(a0) +; CHECK: sub a0, a0, a1 +; CHECK: jalr zero, ra, 0 + ; sextload i1 + %1 = getelementptr i1, i1* %a, i32 1 + %2 = load i1, i1* %1 + %3 = sext i1 %2 to i16 + ; zextload i1 + %4 = getelementptr i1, i1* %a, i32 2 + %5 = load i1, i1* %4 + %6 = zext i1 %5 to i16 + %7 = add i16 %3, %6 + ; extload i1 (anyext). Produced as the load is unused. + %8 = load volatile i1, i1* %a + ret i16 %7 +} + +; Check load and store to a global +@G = global i32 0 + +define i32 @lw_sw_global(i32 %a) nounwind { +; TODO: the addi should be folded in to the lw/sw operations +; CHECK-LABEL: lw_sw_global: +; CHECK: lui {{[a-z0-9]+}}, %hi(G) +; CHECK: addi {{[a-z0-9]+}}, {{[a-z0-9]+}}, %lo(G) +; CHECK: lw {{[a-z0-9]+}}, 0( +; CHECK: sw a0, 0( +; CHECK: lui {{[a-z0-9]+}}, %hi(G+36) +; CHECK: addi {{[a-z0-9]+}}, {{[a-z0-9]+}}, %lo(G+36) +; CHECK: lw {{[a-z0-9]+}}, 0( +; CHECK: sw a0, 0( + %1 = load volatile i32, i32* @G + store i32 %a, i32* @G + %2 = getelementptr i32, i32* @G, i32 9 + %3 = load volatile i32, i32* %2 + store i32 %a, i32* %2 + ret i32 %1 +} + +; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1 +define i32 @lw_sw_constant(i32 %a) nounwind { +; TODO: the addi should be folded in to the lw/sw +; CHECK-LABEL: lw_sw_constant: +; CHECK: lui {{[a-z0-9]+}}, 912092 +; CHECK: addi {{[a-z0-9]+}}, {{[a-z0-9]+}}, -273 +; CHECK: lw {{[a-z0-9]+}}, 0( +; CHECK: sw a0, 0( + %1 = inttoptr i32 3735928559 to i32* + %2 = load volatile i32, i32* %1 + store i32 %a, i32* %1 + ret i32 %2 +} Index: test/CodeGen/RISCV/wide-mem.ll =================================================================== --- /dev/null +++ test/CodeGen/RISCV/wide-mem.ll @@ -0,0 +1,25 @@ +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s + +; Check load/store operations on values wider than what is natively supported + +define i64 @load_i64(i64 *%a) nounwind { +; CHECK-LABEL: load_i64: +; CHECK: lw {{[a-z0-9]+}}, 0(a0) +; CHECK: lw {{[a-z0-9]+}}, 4(a0) + %1 = load i64, i64* %a + ret i64 %1 +} + +@val64 = local_unnamed_addr global i64 2863311530, align 8 + +; TODO: codegen on this should be improved. It shouldn't be necessary to +; generate two addi +define i64 @load_i64_global() nounwind { +; CHECK-LABEL: load_i64_global: +; CHECK: addi a0, a0, %lo(val64) +; CHECK: lw a0, 0(a0) +; CHECK: addi a1, a1, %lo(val64+4) +; CHECK: lw a1, 0(a1) + %1 = load i64, i64* @val64 + ret i64 %1 +}