Index: lib/Target/Mips/MipsFastISel.cpp =================================================================== --- lib/Target/Mips/MipsFastISel.cpp +++ lib/Target/Mips/MipsFastISel.cpp @@ -4,6 +4,7 @@ #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/Target/TargetInstrInfo.h" @@ -12,6 +13,7 @@ #include "MipsISelLowering.h" #include "MipsMachineFunction.h" #include "MipsSubtarget.h" +#include "MipsTargetMachine.h" using namespace llvm; @@ -36,11 +38,11 @@ /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can /// make the right decision when generating code for different targets. - const MipsSubtarget *Subtarget; Module &M; const TargetMachine &TM; const TargetInstrInfo &TII; const TargetLowering &TLI; + const MipsSubtarget &Subtarget; MipsFunctionInfo *MFI; // Convenience variables to avoid some queries. @@ -54,12 +56,12 @@ : FastISel(funcInfo, libInfo), M(const_cast(*funcInfo.Fn->getParent())), TM(funcInfo.MF->getTarget()), TII(*TM.getInstrInfo()), - TLI(*TM.getTargetLowering()) { - Subtarget = &TM.getSubtarget(); + TLI(*TM.getTargetLowering()), + Subtarget(TM.getSubtarget()) { MFI = funcInfo.MF->getInfo(); Context = &funcInfo.Fn->getContext(); - TargetSupported = ((Subtarget->getRelocationModel() == Reloc::PIC_) && - (Subtarget->hasMips32r2() && (Subtarget->isABI_O32()))); + TargetSupported = ((Subtarget.getRelocationModel() == Reloc::PIC_) && + (Subtarget.hasMips32r2() && (Subtarget.isABI_O32()))); } bool TargetSelectInstruction(const Instruction *I) override; @@ -70,6 +72,10 @@ private: bool EmitStore(MVT VT, unsigned SrcReg, Address &Addr, unsigned Alignment = 0); + + bool EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, unsigned Alignment=0, bool IsZExt = true); + + bool SelectLoad(const Instruction *I); bool SelectRet(const Instruction *I); bool SelectStore(const Instruction *I); @@ -80,6 +86,30 @@ unsigned MaterializeGV(const GlobalValue *GV, MVT VT); unsigned MaterializeInt(const Constant *C, MVT VT); unsigned Materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC); + + unsigned FastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, uint64_t imm1, + uint64_t imm2, unsigned Op3, bool Op3IsKill) { + return 0; + } + + MachineInstrBuilder EmitInst(unsigned Opc) { + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + } + + MachineInstrBuilder EmitInst(unsigned Opc, unsigned DstReg) { + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DstReg); + } + + MachineInstrBuilder EmitInstStore(unsigned Opc, unsigned SrcReg, unsigned MemReg , int64_t MemOffset) { + return EmitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset); + } + + MachineInstrBuilder EmitInstLoadMem(unsigned Opc, unsigned DstReg, unsigned MemReg , int64_t MemOffset) { + return EmitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset); + } + +#include "MipsGenFastISel.inc" }; bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) { @@ -100,6 +130,8 @@ // We will extend this in a later patch: // If this is a type than can be sign or zero-extended to a basic operation // go ahead and accept it now. + if (VT == MVT::i8 || VT == MVT::i16) + return true; return false; } @@ -136,17 +168,93 @@ return 0; } +bool MipsFastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, unsigned Alignment, bool IsZExt) { + // + // more cases will be handled here in following patches. + // + if (!IsZExt) + return false; // this is not needed yet. + unsigned Opc; + switch (VT.SimpleTy) { + case MVT::i32: { + ResultReg = createResultReg(&Mips::GPR32RegClass); + Opc = Mips::LW; + break; + } + case MVT::i16: { + ResultReg = createResultReg(&Mips::GPR32RegClass); + Opc = Mips::LH; + break; + } + case MVT::i8: { + ResultReg = createResultReg(&Mips::GPR32RegClass); + Opc = Mips::LB; + break; + } + case MVT::f32: { + ResultReg = createResultReg(&Mips::FGR32RegClass); + Opc = Mips::LWC1; + break; + } + case MVT::f64: { + ResultReg = createResultReg(&Mips::FGR32RegClass); + Opc = Mips::LDC1; + break; + } + default: + return false; + } + EmitInstLoadMem(Opc, ResultReg, Addr.Base.Reg, Addr.Offset); + return true; +} + bool MipsFastISel::EmitStore(MVT VT, unsigned SrcReg, Address &Addr, unsigned Alignment) { // // more cases will be handled here in following patches. // - if (VT != MVT::i32) + unsigned Opc; + switch (VT.SimpleTy) { + case MVT::i8: + Opc=Mips::SB; + break; + case MVT::i16: + Opc=Mips::SH; + break; + case MVT::i32: + Opc=Mips::SW; + break; + case MVT::f32: + Opc=Mips::SWC1; + break; + case MVT::f64: + Opc=Mips::SDC1; + break; + default: return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::SW)) - .addReg(SrcReg) - .addReg(Addr.Base.Reg) - .addImm(Addr.Offset); + } + EmitInstStore(Opc, SrcReg, Addr.Base.Reg, Addr.Offset); + return true; +} + +bool MipsFastISel::SelectLoad(const Instruction *I) { + // Atomic loads need special handling. + if (cast(I)->isAtomic()) + return false; + + // Verify we have a legal type before going any further. + MVT VT; + if (!isLoadTypeLegal(I->getType(), VT)) + return false; + + // See if we can handle this address. + Address Addr; + if (!ComputeAddress(I->getOperand(0), Addr)) return false; + + unsigned ResultReg; + if (!EmitLoad(VT, ResultReg, Addr, cast(I)->getAlignment())) + return false; + UpdateValueMap(I, ResultReg); return true; } @@ -186,8 +294,7 @@ if (Ret->getNumOperands() > 0) { return false; } - unsigned RetOpc = Mips::RetRA; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(RetOpc)); + EmitInst(Mips::RetRA); return true; } @@ -197,6 +304,8 @@ switch (I->getOpcode()) { default: break; + case Instruction::Load: + return SelectLoad(I); case Instruction::Store: return SelectStore(I); case Instruction::Ret: @@ -207,6 +316,22 @@ } unsigned MipsFastISel::MaterializeFP(const ConstantFP *CFP, MVT VT) { + int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); + if (VT == MVT::f32) { + const TargetRegisterClass *RC = &Mips::FGR32RegClass; + unsigned DestReg = createResultReg(RC); + unsigned TempReg = Materialize32BitInt(Imm, &Mips::GPR32RegClass); + EmitInst(Mips::MTC1, DestReg).addReg(TempReg); + return DestReg; + } else if (VT == MVT::f64) { + const TargetRegisterClass *RC = &Mips::AFGR64RegClass; + unsigned DestReg = createResultReg(RC); + unsigned TempReg1 = Materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass); + unsigned TempReg2 = + Materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass); + EmitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1); + return DestReg; + } return 0; } @@ -221,8 +346,7 @@ // TLS not supported at this time. if (IsThreadLocal) return 0; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LW), DestReg) - .addReg(MFI->getGlobalBaseReg()) + EmitInst(Mips::LW, DestReg).addReg(MFI->getGlobalBaseReg()) .addGlobalAddress(GV, 0, MipsII::MO_GOT); return DestReg; } @@ -245,15 +369,10 @@ if (isInt<16>(Imm)) { unsigned Opc = Mips::ADDiu; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) - .addReg(Mips::ZERO) - .addImm(Imm); + EmitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm); return ResultReg; } else if (isUInt<16>(Imm)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::ORi), - ResultReg) - .addReg(Mips::ZERO) - .addImm(Imm); + EmitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm); return ResultReg; } unsigned Lo = Imm & 0xFFFF; @@ -261,16 +380,11 @@ if (Lo) { // Both Lo and Hi have nonzero bits. unsigned TmpReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LUi), - TmpReg).addImm(Hi); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::ORi), - ResultReg) - .addReg(TmpReg) - .addImm(Lo); + EmitInst(Mips::LUi, TmpReg).addImm(Hi); + EmitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo); } else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LUi), - ResultReg).addImm(Hi); + EmitInst(Mips::LUi, ResultReg).addImm(Hi); } return ResultReg; } Index: test/CodeGen/Mips/Fast-ISel/loadstore2.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/Fast-ISel/loadstore2.ll @@ -0,0 +1,84 @@ +; ModuleID = 'loadstore2.c' +target datalayout = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64" +target triple = "mips--linux-gnu" + +@c2 = common global i8 0, align 1 +@c1 = common global i8 0, align 1 +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: < %s | FileCheck %s + +@s2 = common global i16 0, align 2 +@s1 = common global i16 0, align 2 +@i2 = common global i32 0, align 4 +@i1 = common global i32 0, align 4 +@f2 = common global float 0.000000e+00, align 4 +@f1 = common global float 0.000000e+00, align 4 +@d2 = common global double 0.000000e+00, align 8 +@d1 = common global double 0.000000e+00, align 8 + +; Function Attrs: nounwind +define void @cfoo() #0 { +entry: + %0 = load i8* @c2, align 1 + store i8 %0, i8* @c1, align 1 +; CHECK: .ent cfoo +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end cfoo + + ret void +} + +; Function Attrs: nounwind +define void @sfoo() #0 { +entry: + %0 = load i16* @s2, align 2 + store i16 %0, i16* @s1, align 2 +; CHECK: .ent sfoo +; CHECK: lh ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sh ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end sfoo + ret void +} + +; Function Attrs: nounwind +define void @ifoo() #0 { +entry: + %0 = load i32* @i2, align 4 + store i32 %0, i32* @i1, align 4 +; CHECK: .ent ifoo +; CHECK: lw ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sw ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end ifoo + + ret void +} + +; Function Attrs: nounwind +define void @ffoo() #0 { +entry: + %0 = load float* @f2, align 4 + store float %0, float* @f1, align 4 +; CHECK: .ent ffoo +; CHECK: lwc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: swc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end ffoo + + ret void +} + +; Function Attrs: nounwind +define void @dfoo() #0 { +entry: + %0 = load double* @d2, align 8 + store double %0, double* @d1, align 8 +; CHECK: .ent dfoo +; CHECK: ldc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end dfoo + ret void +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } + + Index: test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll @@ -0,0 +1,39 @@ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: < %s | FileCheck %s + +@f = common global float 0.000000e+00, align 4 +@de = common global double 0.000000e+00, align 8 + +; Function Attrs: nounwind +define void @f1() #0 { +entry: + store float 0x3FFA76C8C0000000, float* @f, align 4 + ret void +; CHECK: .ent f1 +; CHECK: lui $[[REG1:[0-9]+]], 16339 +; CHECK: ori $[[REG2:[0-9]+]], $[[REG1]], 46662 +; CHECK: mtc1 $[[REG2]], $f[[REG3:[0-9]+]] +; CHECK: lw $[[REG4:[0-9]+]], %got(f)(${{[0-9]+}}) +; CHECK: swc1 $f[[REG3]], 0($[[REG4]]) +; CHECK: .end f1 + +} + +; Function Attrs: nounwind +define void @d1() #0 { +entry: + store double 1.234567e+00, double* @de, align 8 +; CHECK: .ent d1 +; CHECK: lui $[[REG1a:[0-9]+]], 16371 +; CHECK: ori $[[REG2a:[0-9]+]], $[[REG1a]], 49353 +; CHECK: lui $[[REG1b:[0-9]+]], 21403 +; CHECK: ori $[[REG2b:[0-9]+]], $[[REG1b]], 34951 +; CHECK: mtc1 $[[REG2b]], $f[[REG3b:[0-9]+]] +; CHECK: mtc1 $[[REG2a]], $f[[REG3a:[0-9]+]] +; CHECK: sdc1 $f[[REG3b]], 0(${{[0-9]+}}) +; CHECK: .end d1 + ret void +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +