Index: lib/Target/Mips/MipsFastISel.cpp =================================================================== --- lib/Target/Mips/MipsFastISel.cpp +++ lib/Target/Mips/MipsFastISel.cpp @@ -4,6 +4,7 @@ #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/IR/GlobalAlias.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/Target/TargetInstrInfo.h" @@ -12,6 +13,7 @@ #include "MipsISelLowering.h" #include "MipsMachineFunction.h" #include "MipsSubtarget.h" +#include "MipsTargetMachine.h" using namespace llvm; @@ -36,11 +38,11 @@ /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can /// make the right decision when generating code for different targets. - const MipsSubtarget *Subtarget; Module &M; const TargetMachine &TM; const TargetInstrInfo &TII; const TargetLowering &TLI; + const MipsSubtarget &Subtarget; MipsFunctionInfo *MFI; // Convenience variables to avoid some queries. @@ -54,12 +56,12 @@ : FastISel(funcInfo, libInfo), M(const_cast(*funcInfo.Fn->getParent())), TM(funcInfo.MF->getTarget()), TII(*TM.getInstrInfo()), - TLI(*TM.getTargetLowering()) { - Subtarget = &TM.getSubtarget(); + TLI(*TM.getTargetLowering()), + Subtarget(TM.getSubtarget()) { MFI = funcInfo.MF->getInfo(); Context = &funcInfo.Fn->getContext(); - TargetSupported = ((Subtarget->getRelocationModel() == Reloc::PIC_) && - (Subtarget->hasMips32r2() && (Subtarget->isABI_O32()))); + TargetSupported = ((Subtarget.getRelocationModel() == Reloc::PIC_) && + (Subtarget.hasMips32r2() && (Subtarget.isABI_O32()))); } bool TargetSelectInstruction(const Instruction *I) override; @@ -70,8 +72,14 @@ private: bool EmitStore(MVT VT, unsigned SrcReg, Address &Addr, unsigned Alignment = 0); + + bool EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, unsigned Alignment=0, bool IsZExt = true); + + bool SelectLoad(const Instruction *I); bool SelectRet(const Instruction *I); bool SelectStore(const Instruction *I); + bool SelectIntExt(const Instruction *I); + bool SelectTrunc(const Instruction *I); bool isTypeLegal(Type *Ty, MVT &VT); bool isLoadTypeLegal(Type *Ty, MVT &VT); @@ -80,6 +88,36 @@ unsigned MaterializeGV(const GlobalValue *GV, MVT VT); unsigned MaterializeInt(const Constant *C, MVT VT); unsigned Materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC); + + bool EmitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, + unsigned DestReg); + + bool EmitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, + unsigned DestReg); + + unsigned FastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, uint64_t imm1, + uint64_t imm2, unsigned Op3, bool Op3IsKill) { + return 0; + } + + MachineInstrBuilder EmitInst(unsigned Opc) { + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc)); + } + + MachineInstrBuilder EmitInst(unsigned Opc, unsigned DstReg) { + return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DstReg); + } + + MachineInstrBuilder EmitInstStore(unsigned Opc, unsigned SrcReg, unsigned MemReg , int64_t MemOffset) { + return EmitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset); + } + + MachineInstrBuilder EmitInstLoadMem(unsigned Opc, unsigned DstReg, unsigned MemReg , int64_t MemOffset) { + return EmitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset); + } + +#include "MipsGenFastISel.inc" }; bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) { @@ -100,6 +138,8 @@ // We will extend this in a later patch: // If this is a type than can be sign or zero-extended to a basic operation // go ahead and accept it now. + if (VT == MVT::i8 || VT == MVT::i16) + return true; return false; } @@ -136,17 +176,131 @@ return 0; } +bool MipsFastISel::EmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, unsigned Alignment, bool IsZExt) { + // + // more cases will be handled here in following patches. + // + if (!IsZExt) + return false; // this is not needed yet. + unsigned Opc; + switch (VT.SimpleTy) { + case MVT::i32: { + ResultReg = createResultReg(&Mips::GPR32RegClass); + Opc = Mips::LW; + break; + } + case MVT::i16: { + ResultReg = createResultReg(&Mips::GPR32RegClass); + Opc = Mips::LH; + break; + } + case MVT::i8: { + ResultReg = createResultReg(&Mips::GPR32RegClass); + Opc = Mips::LB; + break; + } + case MVT::f32: { + ResultReg = createResultReg(&Mips::FGR32RegClass); + Opc = Mips::LWC1; + break; + } + case MVT::f64: { + ResultReg = createResultReg(&Mips::FGR32RegClass); + Opc = Mips::LDC1; + break; + } + default: + return false; + } + EmitInstLoadMem(Opc, ResultReg, Addr.Base.Reg, Addr.Offset); + return true; +} + bool MipsFastISel::EmitStore(MVT VT, unsigned SrcReg, Address &Addr, unsigned Alignment) { // // more cases will be handled here in following patches. // - if (VT != MVT::i32) + unsigned Opc; + switch (VT.SimpleTy) { + case MVT::i8: + Opc=Mips::SB; + break; + case MVT::i16: + Opc=Mips::SH; + break; + case MVT::i32: + Opc=Mips::SW; + break; + case MVT::f32: + Opc=Mips::SWC1; + break; + case MVT::f64: + Opc=Mips::SDC1; + break; + default: + return false; + } + EmitInstStore(Opc, SrcReg, Addr.Base.Reg, Addr.Offset); + return true; +} + +// Attempt to emit an integer extend of SrcReg into DestReg. Both +// signed and zero extensions are supported. Return false if we +// can't handle it. +bool MipsFastISel::EmitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, + unsigned DestReg) { + if ((DestVT != MVT::i32) && (DestVT != MVT::i16)) + return false; + switch (SrcVT.SimpleTy) { + default: return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::SW)) - .addReg(SrcReg) - .addReg(Addr.Base.Reg) - .addImm(Addr.Offset); + case MVT::i8 : + EmitInst(Mips::SEB, DestReg).addReg(SrcReg); + break; + case MVT::i16 : + EmitInst(Mips::SEH, DestReg).addReg(SrcReg); + break; + } + return true; +} + +bool MipsFastISel::EmitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, + unsigned DestReg) { + switch (SrcVT.SimpleTy) { + default: + return false; + case MVT::i1 : + EmitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(1); + break; + case MVT::i8 : + EmitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(0xff); + break; + case MVT::i16 : + EmitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(0xffff); + break; + } + return true; +} + +bool MipsFastISel::SelectLoad(const Instruction *I) { + // Atomic loads need special handling. + if (cast(I)->isAtomic()) + return false; + + // Verify we have a legal type before going any further. + MVT VT; + if (!isLoadTypeLegal(I->getType(), VT)) + return false; + + // See if we can handle this address. + Address Addr; + if (!ComputeAddress(I->getOperand(0), Addr)) return false; + + unsigned ResultReg; + if (!EmitLoad(VT, ResultReg, Addr, cast(I)->getAlignment())) + return false; + UpdateValueMap(I, ResultReg); return true; } @@ -186,8 +340,64 @@ if (Ret->getNumOperands() > 0) { return false; } - unsigned RetOpc = Mips::RetRA; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(RetOpc)); + EmitInst(Mips::RetRA); + return true; +} + +bool MipsFastISel::SelectIntExt(const Instruction *I) { + // On ARM, in general, integer casts don't involve legal types; this code + // handles promotable integers. + Type *DestTy = I->getType(); + Value *Src = I->getOperand(0); + Type *SrcTy = Src->getType(); + + bool isZExt = isa(I); + unsigned SrcReg = getRegForValue(Src); + if (!SrcReg) return false; + + EVT SrcEVT, DestEVT; + SrcEVT = TLI.getValueType(SrcTy, true); + DestEVT = TLI.getValueType(DestTy, true); + if (!SrcEVT.isSimple()) return false; + if (!DestEVT.isSimple()) return false; + + MVT SrcVT = SrcEVT.getSimpleVT(); + MVT DestVT = DestEVT.getSimpleVT(); + unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); + + if (isZExt) { + if(!EmitIntZExt(SrcVT, SrcReg, DestVT, ResultReg)) + return false; + } + else { + if (!EmitIntSExt(SrcVT, SrcReg, DestVT, ResultReg)) + return false; + } + UpdateValueMap(I, ResultReg); + return true; +} + + +bool MipsFastISel::SelectTrunc(const Instruction *I) { + // The high bits for a type smaller than the register size are assumed to be + // undefined. + Value *Op = I->getOperand(0); + + EVT SrcVT, DestVT; + SrcVT = TLI.getValueType(Op->getType(), true); + DestVT = TLI.getValueType(I->getType(), true); + + if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8) + return false; + if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1) + return false; + + unsigned SrcReg = getRegForValue(Op); + if (!SrcReg) return false; + + // Because the high bits are undefined, a truncate doesn't generate + // any code. + UpdateValueMap(I, SrcReg); return true; } @@ -197,16 +407,39 @@ switch (I->getOpcode()) { default: break; + case Instruction::Load: + return SelectLoad(I); case Instruction::Store: return SelectStore(I); case Instruction::Ret: return SelectRet(I); + case Instruction::Trunc: + return SelectTrunc(I); + case Instruction::ZExt: + case Instruction::SExt: + return SelectIntExt(I); } return false; } } unsigned MipsFastISel::MaterializeFP(const ConstantFP *CFP, MVT VT) { + int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); + if (VT == MVT::f32) { + const TargetRegisterClass *RC = &Mips::FGR32RegClass; + unsigned DestReg = createResultReg(RC); + unsigned TempReg = Materialize32BitInt(Imm, &Mips::GPR32RegClass); + EmitInst(Mips::MTC1, DestReg).addReg(TempReg); + return DestReg; + } else if (VT == MVT::f64) { + const TargetRegisterClass *RC = &Mips::AFGR64RegClass; + unsigned DestReg = createResultReg(RC); + unsigned TempReg1 = Materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass); + unsigned TempReg2 = + Materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass); + EmitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1); + return DestReg; + } return 0; } @@ -221,8 +454,7 @@ // TLS not supported at this time. if (IsThreadLocal) return 0; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LW), DestReg) - .addReg(MFI->getGlobalBaseReg()) + EmitInst(Mips::LW, DestReg).addReg(MFI->getGlobalBaseReg()) .addGlobalAddress(GV, 0, MipsII::MO_GOT); return DestReg; } @@ -245,15 +477,10 @@ if (isInt<16>(Imm)) { unsigned Opc = Mips::ADDiu; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) - .addReg(Mips::ZERO) - .addImm(Imm); + EmitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm); return ResultReg; } else if (isUInt<16>(Imm)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::ORi), - ResultReg) - .addReg(Mips::ZERO) - .addImm(Imm); + EmitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm); return ResultReg; } unsigned Lo = Imm & 0xFFFF; @@ -261,16 +488,11 @@ if (Lo) { // Both Lo and Hi have nonzero bits. unsigned TmpReg = createResultReg(RC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LUi), - TmpReg).addImm(Hi); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::ORi), - ResultReg) - .addReg(TmpReg) - .addImm(Lo); + EmitInst(Mips::LUi, TmpReg).addImm(Hi); + EmitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo); } else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LUi), - ResultReg).addImm(Hi); + EmitInst(Mips::LUi, ResultReg).addImm(Hi); } return ResultReg; } Index: test/CodeGen/Mips/Fast-ISel/loadstore2.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/Fast-ISel/loadstore2.ll @@ -0,0 +1,84 @@ +; ModuleID = 'loadstore2.c' +target datalayout = "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64" +target triple = "mips--linux-gnu" + +@c2 = common global i8 0, align 1 +@c1 = common global i8 0, align 1 +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: < %s | FileCheck %s + +@s2 = common global i16 0, align 2 +@s1 = common global i16 0, align 2 +@i2 = common global i32 0, align 4 +@i1 = common global i32 0, align 4 +@f2 = common global float 0.000000e+00, align 4 +@f1 = common global float 0.000000e+00, align 4 +@d2 = common global double 0.000000e+00, align 8 +@d1 = common global double 0.000000e+00, align 8 + +; Function Attrs: nounwind +define void @cfoo() #0 { +entry: + %0 = load i8* @c2, align 1 + store i8 %0, i8* @c1, align 1 +; CHECK: .ent cfoo +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end cfoo + + ret void +} + +; Function Attrs: nounwind +define void @sfoo() #0 { +entry: + %0 = load i16* @s2, align 2 + store i16 %0, i16* @s1, align 2 +; CHECK: .ent sfoo +; CHECK: lh ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sh ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end sfoo + ret void +} + +; Function Attrs: nounwind +define void @ifoo() #0 { +entry: + %0 = load i32* @i2, align 4 + store i32 %0, i32* @i1, align 4 +; CHECK: .ent ifoo +; CHECK: lw ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sw ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end ifoo + + ret void +} + +; Function Attrs: nounwind +define void @ffoo() #0 { +entry: + %0 = load float* @f2, align 4 + store float %0, float* @f1, align 4 +; CHECK: .ent ffoo +; CHECK: lwc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: swc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end ffoo + + ret void +} + +; Function Attrs: nounwind +define void @dfoo() #0 { +entry: + %0 = load double* @d2, align 8 + store double %0, double* @d1, align 8 +; CHECK: .ent dfoo +; CHECK: ldc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: sdc1 $f{{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: .end dfoo + ret void +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } + + Index: test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll @@ -0,0 +1,166 @@ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: < %s | FileCheck %s + +@b2 = global i8 0, align 1 +@b1 = global i8 1, align 1 +@uc1 = global i8 0, align 1 +@uc2 = global i8 -1, align 1 +@sc1 = global i8 -128, align 1 +@sc2 = global i8 127, align 1 +@ss1 = global i16 -32768, align 2 +@ss2 = global i16 32767, align 2 +@us1 = global i16 0, align 2 +@us2 = global i16 -1, align 2 +@ssi = global i16 0, align 2 +@ssj = global i16 0, align 2 +@i = global i32 0, align 4 +@j = global i32 0, align 4 +@.str = private unnamed_addr constant [4 x i8] c"%i\0A\00", align 1 +@.str1 = private unnamed_addr constant [7 x i8] c"%i %i\0A\00", align 1 + +; Function Attrs: nounwind +define void @_Z3b_iv() #0 { +entry: +; CHECK: .ent _Z3b_iv + %0 = load i8* @b1, align 1 + %tobool = trunc i8 %0 to i1 + %frombool = zext i1 %tobool to i8 + store i8 %frombool, i8* @b2, align 1 + %1 = load i8* @b2, align 1 + %tobool1 = trunc i8 %1 to i1 + %conv = zext i1 %tobool1 to i32 + store i32 %conv, i32* @i, align 4 +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 1 +; CHECK: sb ${{[0-9]+}}, 0(${{[0-9]+}}) + + + + ret void +; CHECK: .end _Z3b_iv +} + +; Function Attrs: nounwind +define void @_Z4uc_iv() #0 { +entry: +; CHECK: .ent _Z4uc_iv + + %0 = load i8* @uc1, align 1 + %conv = zext i8 %0 to i32 + store i32 %conv, i32* @i, align 4 + %1 = load i8* @uc2, align 1 + %conv1 = zext i8 %1 to i32 +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 255 + + store i32 %conv1, i32* @j, align 4 + ret void +; CHECK: .end _Z4uc_iv + +} + +; Function Attrs: nounwind +define void @_Z4sc_iv() #0 { +entry: +; CHECK: .ent _Z4sc_iv + + %0 = load i8* @sc1, align 1 + %conv = sext i8 %0 to i32 + store i32 %conv, i32* @i, align 4 + %1 = load i8* @sc2, align 1 + %conv1 = sext i8 %1 to i32 + store i32 %conv1, i32* @j, align 4 +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: seb ${{[0-9]+}}, ${{[0-9]+}} + + ret void +; CHECK: .end _Z4sc_iv +} + +; Function Attrs: nounwind +define void @_Z4us_iv() #0 { +entry: +; CHECK: .ent _Z4us_iv + %0 = load i16* @us1, align 2 + %conv = zext i16 %0 to i32 + store i32 %conv, i32* @i, align 4 + %1 = load i16* @us2, align 2 + %conv1 = zext i16 %1 to i32 + store i32 %conv1, i32* @j, align 4 + ret void +; CHECK: lh ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 65535 +; CHECK: .end _Z4us_iv +} + +; Function Attrs: nounwind +define void @_Z4ss_iv() #0 { +entry: +; CHECK: .ent _Z4ss_iv + + %0 = load i16* @ss1, align 2 + %conv = sext i16 %0 to i32 + store i32 %conv, i32* @i, align 4 + %1 = load i16* @ss2, align 2 + %conv1 = sext i16 %1 to i32 + store i32 %conv1, i32* @j, align 4 +; CHECK: lh ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: seh ${{[0-9]+}}, ${{[0-9]+}} + + ret void +; CHECK: .end _Z4ss_iv +} + +; Function Attrs: nounwind +define void @_Z4b_ssv() #0 { +entry: +; CHECK: .ent _Z4b_ssv + %0 = load i8* @b2, align 1 + %tobool = trunc i8 %0 to i1 + %conv = zext i1 %tobool to i16 + store i16 %conv, i16* @ssi, align 2 + ret void +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 1 +; CHECK: .end _Z4b_ssv +} + +; Function Attrs: nounwind +define void @_Z5uc_ssv() #0 { +entry: +; CHECK: .ent _Z5uc_ssv + %0 = load i8* @uc1, align 1 + %conv = zext i8 %0 to i16 + store i16 %conv, i16* @ssi, align 2 + %1 = load i8* @uc2, align 1 + %conv1 = zext i8 %1 to i16 +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: andi ${{[0-9]+}}, ${{[0-9]+}}, 255 + + store i16 %conv1, i16* @ssj, align 2 + ret void +; CHECK: .end _Z5uc_ssv +} + +; Function Attrs: nounwind +define void @_Z5sc_ssv() #0 { +entry: +; CHECK: .ent _Z5sc_ssv + %0 = load i8* @sc1, align 1 + %conv = sext i8 %0 to i16 + store i16 %conv, i16* @ssi, align 2 + %1 = load i8* @sc2, align 1 + %conv1 = sext i8 %1 to i16 + store i16 %conv1, i16* @ssj, align 2 +; CHECK: lb ${{[0-9]+}}, 0(${{[0-9]+}}) +; CHECK: seb ${{[0-9]+}}, ${{[0-9]+}} + + ret void +; CHECK: .end _Z5sc_ssv +} + +declare i32 @printf(i8*, ...) #1 + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } + Index: test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/Fast-ISel/simplestorefp1.ll @@ -0,0 +1,39 @@ +; RUN: llc -march=mipsel -relocation-model=pic -O0 -mips-fast-isel -fast-isel-abort -mcpu=mips32r2 \ +; RUN: < %s | FileCheck %s + +@f = common global float 0.000000e+00, align 4 +@de = common global double 0.000000e+00, align 8 + +; Function Attrs: nounwind +define void @f1() #0 { +entry: + store float 0x3FFA76C8C0000000, float* @f, align 4 + ret void +; CHECK: .ent f1 +; CHECK: lui $[[REG1:[0-9]+]], 16339 +; CHECK: ori $[[REG2:[0-9]+]], $[[REG1]], 46662 +; CHECK: mtc1 $[[REG2]], $f[[REG3:[0-9]+]] +; CHECK: lw $[[REG4:[0-9]+]], %got(f)(${{[0-9]+}}) +; CHECK: swc1 $f[[REG3]], 0($[[REG4]]) +; CHECK: .end f1 + +} + +; Function Attrs: nounwind +define void @d1() #0 { +entry: + store double 1.234567e+00, double* @de, align 8 +; CHECK: .ent d1 +; CHECK: lui $[[REG1a:[0-9]+]], 16371 +; CHECK: ori $[[REG2a:[0-9]+]], $[[REG1a]], 49353 +; CHECK: lui $[[REG1b:[0-9]+]], 21403 +; CHECK: ori $[[REG2b:[0-9]+]], $[[REG1b]], 34951 +; CHECK: mtc1 $[[REG2b]], $f[[REG3b:[0-9]+]] +; CHECK: mtc1 $[[REG2a]], $f[[REG3a:[0-9]+]] +; CHECK: sdc1 $f[[REG3b]], 0(${{[0-9]+}}) +; CHECK: .end d1 + ret void +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +