diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp index cb3c5ef752dd..e82f62260b3f 100644 --- a/llvm/lib/Target/Mips/MipsCallLowering.cpp +++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp @@ -1,439 +1,441 @@ //===- MipsCallLowering.cpp -------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // /// \file /// This file implements the lowering of LLVM calls to machine code calls for /// GlobalISel. // //===----------------------------------------------------------------------===// #include "MipsCallLowering.h" #include "MipsCCState.h" #include "MipsTargetMachine.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" using namespace llvm; MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) : CallLowering(&TLI) {} bool MipsCallLowering::MipsHandler::assign(const CCValAssign &VA, unsigned vreg) { if (VA.isRegLoc()) { assignValueToReg(vreg, VA.getLocReg()); } else if (VA.isMemLoc()) { unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; unsigned Offset = VA.getLocMemOffset(); MachinePointerInfo MPO; unsigned StackAddr = getStackAddress(Size, Offset, MPO); assignValueToAddress(vreg, StackAddr, Size, MPO); } else { return false; } return true; } namespace { class IncomingValueHandler : public MipsCallLowering::MipsHandler { public: IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) : MipsHandler(MIRBuilder, MRI) {} bool handle(ArrayRef ArgLocs, ArrayRef Args); private: void assignValueToReg(unsigned ValVReg, unsigned PhysReg) override; unsigned getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override; void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, MachinePointerInfo &MPO) override; virtual void markPhysRegUsed(unsigned PhysReg) { MIRBuilder.getMBB().addLiveIn(PhysReg); } void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment, MachinePointerInfo &MPO) { MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOLoad, Size, Alignment); MIRBuilder.buildLoad(Val, Addr, *MMO); } }; class CallReturnHandler : public IncomingValueHandler { public: CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) : IncomingValueHandler(MIRBuilder, MRI), MIB(MIB) {} private: void markPhysRegUsed(unsigned PhysReg) override { MIB.addDef(PhysReg, RegState::Implicit); } MachineInstrBuilder &MIB; }; } // end anonymous namespace void IncomingValueHandler::assignValueToReg(unsigned ValVReg, unsigned PhysReg) { MIRBuilder.buildCopy(ValVReg, PhysReg); markPhysRegUsed(PhysReg); } unsigned IncomingValueHandler::getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) { MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo(); int FI = MFI.CreateFixedObject(Size, Offset, true); MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); MIRBuilder.buildFrameIndex(AddrReg, FI); return AddrReg; } void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, MachinePointerInfo &MPO) { // If the value is not extended, a simple load will suffice. buildLoad(ValVReg, Addr, Size, /* Alignment */ 0, MPO); } bool IncomingValueHandler::handle(ArrayRef ArgLocs, ArrayRef Args) { for (unsigned i = 0, ArgsSize = Args.size(); i < ArgsSize; ++i) { if (!assign(ArgLocs[i], Args[i].Reg)) return false; } return true; } namespace { class OutgoingValueHandler : public MipsCallLowering::MipsHandler { public: OutgoingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} bool handle(ArrayRef ArgLocs, ArrayRef Args); private: void assignValueToReg(unsigned ValVReg, unsigned PhysReg) override; unsigned getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override; void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, MachinePointerInfo &MPO) override; MachineInstrBuilder &MIB; }; } // end anonymous namespace void OutgoingValueHandler::assignValueToReg(unsigned ValVReg, unsigned PhysReg) { MIRBuilder.buildCopy(PhysReg, ValVReg); MIB.addUse(PhysReg, RegState::Implicit); } unsigned OutgoingValueHandler::getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) { LLT p0 = LLT::pointer(0, 32); LLT s32 = LLT::scalar(32); unsigned SPReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildCopy(SPReg, Mips::SP); unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); MIRBuilder.buildConstant(OffsetReg, Offset); unsigned AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); return AddrReg; } void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, MachinePointerInfo &MPO) { MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOStore, Size, /* Alignment */ 0); MIRBuilder.buildStore(ValVReg, Addr, *MMO); } bool OutgoingValueHandler::handle(ArrayRef ArgLocs, ArrayRef Args) { for (unsigned i = 0; i < Args.size(); ++i) { if (!assign(ArgLocs[i], Args[i].Reg)) return false; } return true; } static bool isSupportedType(Type *T) { if (T->isIntegerTy() && T->getScalarSizeInBits() == 32) return true; + if (T->isPointerTy()) + return true; return false; } bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, unsigned VReg) const { MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); if (Val != nullptr) { if (!isSupportedType(Val->getType())) return false; MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); const DataLayout &DL = MF.getDataLayout(); const MipsTargetLowering &TLI = *getTLI(); SmallVector RetInfos; SmallVector OrigArgIndices; ArgInfo ArgRetInfo(VReg, Val->getType()); setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F); splitToValueTypes(ArgRetInfo, 0, RetInfos, OrigArgIndices); SmallVector Outs; subTargetRegTypeForCallingConv( MIRBuilder, RetInfos, OrigArgIndices, [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, unsigned partOffs) { Outs.emplace_back(flags, vt, argvt, used, origIdx, partOffs); }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); if (!RetHandler.handle(ArgLocs, RetInfos)) { return false; } } MIRBuilder.insertInstr(Ret); return true; } bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef VRegs) const { // Quick exit if there aren't any args. if (F.arg_empty()) return true; if (F.isVarArg()) { return false; } for (auto &Arg : F.args()) { if (!isSupportedType(Arg.getType())) return false; } MachineFunction &MF = MIRBuilder.getMF(); const DataLayout &DL = MF.getDataLayout(); const MipsTargetLowering &TLI = *getTLI(); SmallVector ArgInfos; SmallVector OrigArgIndices; unsigned i = 0; for (auto &Arg : F.args()) { ArgInfo AInfo(VRegs[i], Arg.getType()); setArgFlags(AInfo, i + AttributeList::FirstArgIndex, DL, F); splitToValueTypes(AInfo, i, ArgInfos, OrigArgIndices); ++i; } SmallVector Ins; subTargetRegTypeForCallingConv( MIRBuilder, ArgInfos, OrigArgIndices, [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, unsigned partOffs) { Ins.emplace_back(flags, vt, argvt, used, origIdx, partOffs); }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); const MipsTargetMachine &TM = static_cast(MF.getTarget()); const MipsABIInfo &ABI = TM.getABI(); CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 1); CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); if (!Handler.handle(ArgLocs, ArgInfos)) return false; return true; } bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef OrigArgs) const { if (CallConv != CallingConv::C) return false; for (auto &Arg : OrigArgs) { if (!isSupportedType(Arg.Ty)) return false; if (Arg.Flags.isByVal() || Arg.Flags.isSRet()) return false; } if (OrigRet.Reg && !isSupportedType(OrigRet.Ty)) return false; MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); const MipsTargetLowering &TLI = *getTLI(); const MipsTargetMachine &TM = static_cast(MF.getTarget()); const MipsABIInfo &ABI = TM.getABI(); MachineInstrBuilder CallSeqStart = MIRBuilder.buildInstr(Mips::ADJCALLSTACKDOWN); // FIXME: Add support for pic calling sequences, long call sequences for O32, // N32 and N64. First handle the case when Callee.isReg(). if (Callee.isReg()) return false; MachineInstrBuilder MIB = MIRBuilder.buildInstrNoInsert(Mips::JAL); MIB.addDef(Mips::SP, RegState::Implicit); MIB.add(Callee); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv())); TargetLowering::ArgListTy FuncOrigArgs; FuncOrigArgs.reserve(OrigArgs.size()); SmallVector ArgInfos; SmallVector OrigArgIndices; unsigned i = 0; for (auto &Arg : OrigArgs) { TargetLowering::ArgListEntry Entry; Entry.Ty = Arg.Ty; FuncOrigArgs.push_back(Entry); splitToValueTypes(Arg, i, ArgInfos, OrigArgIndices); ++i; } SmallVector Outs; subTargetRegTypeForCallingConv( MIRBuilder, ArgInfos, OrigArgIndices, [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, unsigned partOffs) { Outs.emplace_back(flags, vt, argvt, used, origIdx, partOffs); }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr; CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); if (!RetHandler.handle(ArgLocs, ArgInfos)) { return false; } unsigned NextStackOffset = CCInfo.getNextStackOffset(); const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); unsigned StackAlignment = TFL->getStackAlignment(); NextStackOffset = alignTo(NextStackOffset, StackAlignment); CallSeqStart.addImm(NextStackOffset).addImm(0); MIRBuilder.insertInstr(MIB); if (OrigRet.Reg) { ArgInfos.clear(); SmallVector OrigRetIndices; splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); SmallVector Ins; subTargetRegTypeForCallingConv( MIRBuilder, ArgInfos, OrigRetIndices, [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, unsigned partOffs) { Ins.emplace_back(flags, vt, argvt, used, origIdx, partOffs); }); SmallVector ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call); CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); if (!Handler.handle(ArgLocs, ArgInfos)) return false; } MIRBuilder.buildInstr(Mips::ADJCALLSTACKUP).addImm(NextStackOffset).addImm(0); return true; } void MipsCallLowering::subTargetRegTypeForCallingConv( MachineIRBuilder &MIRBuilder, ArrayRef Args, ArrayRef OrigArgIndices, const FunTy &PushBack) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); const MipsTargetLowering &TLI = *getTLI(); unsigned ArgNo = 0; for (auto &Arg : Args) { EVT VT = TLI.getValueType(DL, Arg.Ty); MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), VT); ISD::ArgFlagsTy Flags = Arg.Flags; Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); PushBack(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 0); ++ArgNo; } } void MipsCallLowering::splitToValueTypes( const ArgInfo &OrigArg, unsigned OriginalIndex, SmallVectorImpl &SplitArgs, SmallVectorImpl &SplitArgsOrigIndices) const { // TODO : perform structure and array split. For now we only deal with // types that pass isSupportedType check. SplitArgs.push_back(OrigArg); SplitArgsOrigIndices.push_back(OriginalIndex); } diff --git a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp index 3e2fcd6122aa..da6f9dabdaaf 100644 --- a/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/llvm/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -1,41 +1,41 @@ //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// This file implements the targeting of the Machinelegalizer class for Mips. /// \todo This should be generated by TableGen. //===----------------------------------------------------------------------===// #include "MipsLegalizerInfo.h" #include "MipsTargetMachine.h" using namespace llvm; MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) { using namespace TargetOpcode; const LLT s32 = LLT::scalar(32); const LLT p0 = LLT::pointer(0, 32); getActionDefinitionsBuilder(G_ADD).legalFor({s32}); getActionDefinitionsBuilder({G_LOAD, G_STORE}) - .legalFor({{s32, p0}}); + .legalForCartesianProduct({p0, s32}, {p0}); getActionDefinitionsBuilder(G_CONSTANT) .legalFor({s32}); getActionDefinitionsBuilder(G_GEP) .legalFor({{p0, s32}}); getActionDefinitionsBuilder(G_FRAME_INDEX) .legalFor({p0}); computeTables(); verify(*ST.getInstrInfo()); } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir new file mode 100644 index 000000000000..9f469ea8913a --- /dev/null +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir @@ -0,0 +1,81 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 +--- | + + define void @ptr_arg_in_regs(i32* %p) {entry: ret void} + define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void} + define void @ret_ptr(i8* %p) {entry: ret void} + +... +--- +name: ptr_arg_in_regs +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.1.entry: + liveins: $a0 + + ; MIPS32-LABEL: name: ptr_arg_in_regs + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0 + ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[COPY]], 0 :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LW]] + ; MIPS32: RetRA implicit $v0 + %0:gprb(p0) = COPY $a0 + %1:gprb(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p) + $v0 = COPY %1(s32) + RetRA implicit $v0 + +... +--- +name: ptr_arg_on_stack +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +fixedStack: + - { id: 0, offset: 16, size: 4, alignment: 8, stack-id: 0, isImmutable: true } +body: | + bb.1.entry: + liveins: $a0, $a1, $a2, $a3 + + ; MIPS32-LABEL: name: ptr_arg_on_stack + ; MIPS32: liveins: $a0, $a1, $a2, $a3 + ; MIPS32: [[ADDiu:%[0-9]+]]:gpr32 = ADDiu %fixed-stack.0, 0 + ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 0) + ; MIPS32: [[LW1:%[0-9]+]]:gpr32 = LW [[LW]], 0 :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LW1]] + ; MIPS32: RetRA implicit $v0 + %0:gprb(s32) = COPY $a0 + %1:gprb(s32) = COPY $a1 + %2:gprb(s32) = COPY $a2 + %3:gprb(s32) = COPY $a3 + %5:gprb(p0) = G_FRAME_INDEX %fixed-stack.0 + %4:gprb(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) + %6:gprb(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p) + $v0 = COPY %6(s32) + RetRA implicit $v0 + +... +--- +name: ret_ptr +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +body: | + bb.1.entry: + liveins: $a0 + + ; MIPS32-LABEL: name: ret_ptr + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:gpr32 = COPY $a0 + ; MIPS32: $v0 = COPY [[COPY]] + ; MIPS32: RetRA implicit $v0 + %0:gprb(p0) = COPY $a0 + $v0 = COPY %0(p0) + RetRA implicit $v0 + +... diff --git a/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll new file mode 100644 index 000000000000..41fe492e84e3 --- /dev/null +++ b/llvm/test/CodeGen/Mips/GlobalISel/irtranslator/pointers.ll @@ -0,0 +1,45 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -stop-after=irtranslator -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 + + +define i32 @ptr_arg_in_regs(i32* %p) { + ; MIPS32-LABEL: name: ptr_arg_in_regs + ; MIPS32: bb.1.entry: + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 + ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LOAD]](s32) + ; MIPS32: RetRA implicit $v0 +entry: + %0 = load i32, i32* %p + ret i32 %0 +} + +define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) { + ; MIPS32-LABEL: name: ptr_arg_on_stack + ; MIPS32: bb.1.entry: + ; MIPS32: liveins: $a0, $a1, $a2, $a3 + ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0 + ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1 + ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2 + ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3 + ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) + ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LOAD1]](s32) + ; MIPS32: RetRA implicit $v0 +entry: + %0 = load i32, i32* %p + ret i32 %0 +} + +define i8* @ret_ptr(i8* %p) { + ; MIPS32-LABEL: name: ret_ptr + ; MIPS32: bb.1.entry: + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 + ; MIPS32: $v0 = COPY [[COPY]](p0) + ; MIPS32: RetRA implicit $v0 +entry: + ret i8* %p +} diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir new file mode 100644 index 000000000000..0dbeb55108a2 --- /dev/null +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir @@ -0,0 +1,79 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 +--- | + + define void @ptr_arg_in_regs(i32* %p) {entry: ret void} + define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void} + define void @ret_ptr(i8* %p) {entry: ret void} + +... +--- +name: ptr_arg_in_regs +alignment: 2 +tracksRegLiveness: true +body: | + bb.1.entry: + liveins: $a0 + + ; MIPS32-LABEL: name: ptr_arg_in_regs + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 + ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LOAD]](s32) + ; MIPS32: RetRA implicit $v0 + %0:_(p0) = COPY $a0 + %1:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p) + $v0 = COPY %1(s32) + RetRA implicit $v0 + +... +--- +name: ptr_arg_on_stack +alignment: 2 +tracksRegLiveness: true +fixedStack: + - { id: 0, offset: 16, size: 4, alignment: 8, stack-id: 0, isImmutable: true } +body: | + bb.1.entry: + liveins: $a0, $a1, $a2, $a3 + + ; MIPS32-LABEL: name: ptr_arg_on_stack + ; MIPS32: liveins: $a0, $a1, $a2, $a3 + ; MIPS32: [[COPY:%[0-9]+]]:_(s32) = COPY $a0 + ; MIPS32: [[COPY1:%[0-9]+]]:_(s32) = COPY $a1 + ; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2 + ; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3 + ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) + ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LOAD1]](s32) + ; MIPS32: RetRA implicit $v0 + %0:_(s32) = COPY $a0 + %1:_(s32) = COPY $a1 + %2:_(s32) = COPY $a2 + %3:_(s32) = COPY $a3 + %5:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) + %6:_(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p) + $v0 = COPY %6(s32) + RetRA implicit $v0 + +... +--- +name: ret_ptr +alignment: 2 +tracksRegLiveness: true +body: | + bb.1.entry: + liveins: $a0 + + ; MIPS32-LABEL: name: ret_ptr + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:_(p0) = COPY $a0 + ; MIPS32: $v0 = COPY [[COPY]](p0) + ; MIPS32: RetRA implicit $v0 + %0:_(p0) = COPY $a0 + $v0 = COPY %0(p0) + RetRA implicit $v0 + +... diff --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll new file mode 100644 index 000000000000..b274167a5cb5 --- /dev/null +++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/pointers.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O0 -mtriple=mipsel-linux-gnu -global-isel -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32 + +define i32 @ptr_arg_in_regs(i32* %p) { +; MIPS32-LABEL: ptr_arg_in_regs: +; MIPS32: # %bb.0: # %entry +; MIPS32-NEXT: lw $2, 0($4) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +entry: + %0 = load i32, i32* %p + ret i32 %0 +} + +define i32 @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) { +; MIPS32-LABEL: ptr_arg_on_stack: +; MIPS32: # %bb.0: # %entry +; MIPS32-NEXT: addiu $1, $sp, 16 +; MIPS32-NEXT: lw $1, 0($1) +; MIPS32-NEXT: lw $2, 0($1) +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +entry: + %0 = load i32, i32* %p + ret i32 %0 +} + +define i8* @ret_ptr(i8* %p) { +; MIPS32-LABEL: ret_ptr: +; MIPS32: # %bb.0: # %entry +; MIPS32-NEXT: move $2, $4 +; MIPS32-NEXT: jr $ra +; MIPS32-NEXT: nop +entry: + ret i8* %p +} diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir new file mode 100644 index 000000000000..7c8dc0a19492 --- /dev/null +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir @@ -0,0 +1,82 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 +--- | + + define void @ptr_arg_in_regs(i32* %p) {entry: ret void} + define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void} + define void @ret_ptr(i8* %p) {entry: ret void} + +... +--- +name: ptr_arg_in_regs +alignment: 2 +legalized: true +tracksRegLiveness: true +body: | + bb.1.entry: + liveins: $a0 + + ; MIPS32-LABEL: name: ptr_arg_in_regs + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0 + ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LOAD]](s32) + ; MIPS32: RetRA implicit $v0 + %0:_(p0) = COPY $a0 + %1:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p) + $v0 = COPY %1(s32) + RetRA implicit $v0 + +... +--- +name: ptr_arg_on_stack +alignment: 2 +legalized: true +tracksRegLiveness: true +fixedStack: + - { id: 0, offset: 16, size: 4, alignment: 8, stack-id: 0, isImmutable: true } +body: | + bb.1.entry: + liveins: $a0, $a1, $a2, $a3 + + ; MIPS32-LABEL: name: ptr_arg_on_stack + ; MIPS32: liveins: $a0, $a1, $a2, $a3 + ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0 + ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1 + ; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2 + ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3 + ; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0 + ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0) + ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p) + ; MIPS32: $v0 = COPY [[LOAD1]](s32) + ; MIPS32: RetRA implicit $v0 + %0:_(s32) = COPY $a0 + %1:_(s32) = COPY $a1 + %2:_(s32) = COPY $a2 + %3:_(s32) = COPY $a3 + %5:_(p0) = G_FRAME_INDEX %fixed-stack.0 + %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0) + %6:_(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p) + $v0 = COPY %6(s32) + RetRA implicit $v0 + +... +--- +name: ret_ptr +alignment: 2 +legalized: true +tracksRegLiveness: true +body: | + bb.1.entry: + liveins: $a0 + + ; MIPS32-LABEL: name: ret_ptr + ; MIPS32: liveins: $a0 + ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0 + ; MIPS32: $v0 = COPY [[COPY]](p0) + ; MIPS32: RetRA implicit $v0 + %0:_(p0) = COPY $a0 + $v0 = COPY %0(p0) + RetRA implicit $v0 + +...