diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -2464,7 +2464,7 @@ const TargetLibraryInfo *LibInfo) { // Only available on 64-bit ELF for now. const PPCSubtarget &Subtarget = FuncInfo.MF->getSubtarget(); - if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) + if (Subtarget.is64BitELFABI()) return new PPCFastISel(FuncInfo, LibInfo); return nullptr; } diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp --- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -5109,8 +5109,8 @@ } case PPCISD::PPC32_PICGOT: // Generate a PIC-safe GOT reference. - assert(!PPCSubTarget->isPPC64() && PPCSubTarget->isSVR4ABI() && - "PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4"); + assert(PPCSubTarget->is32BitELFABI() && + "PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4"); CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT, PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::i32); diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -431,28 +431,26 @@ // VASTART needs to be custom lowered to use the VarArgsFrameIndex setOperationAction(ISD::VASTART , MVT::Other, Custom); - if (Subtarget.isSVR4ABI()) { - if (isPPC64) { - // VAARG always uses double-word chunks, so promote anything smaller. - setOperationAction(ISD::VAARG, MVT::i1, Promote); - AddPromotedToType (ISD::VAARG, MVT::i1, MVT::i64); - setOperationAction(ISD::VAARG, MVT::i8, Promote); - AddPromotedToType (ISD::VAARG, MVT::i8, MVT::i64); - setOperationAction(ISD::VAARG, MVT::i16, Promote); - AddPromotedToType (ISD::VAARG, MVT::i16, MVT::i64); - setOperationAction(ISD::VAARG, MVT::i32, Promote); - AddPromotedToType (ISD::VAARG, MVT::i32, MVT::i64); - setOperationAction(ISD::VAARG, MVT::Other, Expand); - } else { - // VAARG is custom lowered with the 32-bit SVR4 ABI. - setOperationAction(ISD::VAARG, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::i64, Custom); - } + if (Subtarget.is64BitELFABI()) { + // VAARG always uses double-word chunks, so promote anything smaller. + setOperationAction(ISD::VAARG, MVT::i1, Promote); + AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); + setOperationAction(ISD::VAARG, MVT::i8, Promote); + AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); + setOperationAction(ISD::VAARG, MVT::i16, Promote); + AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); + setOperationAction(ISD::VAARG, MVT::i32, Promote); + AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); + setOperationAction(ISD::VAARG, MVT::Other, Expand); + } else if (Subtarget.is32BitELFABI()) { + // VAARG is custom lowered with the 32-bit SVR4 ABI. + setOperationAction(ISD::VAARG, MVT::Other, Custom); + setOperationAction(ISD::VAARG, MVT::i64, Custom); } else setOperationAction(ISD::VAARG, MVT::Other, Expand); - if (Subtarget.isSVR4ABI() && !isPPC64) - // VACOPY is custom lowered with the 32-bit SVR4 ABI. + // VACOPY is custom lowered with the 32-bit SVR4 ABI. + if (Subtarget.is32BitELFABI()) setOperationAction(ISD::VACOPY , MVT::Other, Custom); else setOperationAction(ISD::VACOPY , MVT::Other, Expand); @@ -2688,7 +2686,7 @@ // 64-bit SVR4 ABI code is always position-independent. // The actual address of the GlobalValue is stored in the TOC. - if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { + if (Subtarget.is64BitELFABI()) { setUsesTOCBasePtr(DAG); SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); return getTOCEntry(DAG, SDLoc(CP), true, GA); @@ -2764,7 +2762,7 @@ // 64-bit SVR4 ABI code is always position-independent. // The actual address of the GlobalValue is stored in the TOC. - if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { + if (Subtarget.is64BitELFABI()) { setUsesTOCBasePtr(DAG); SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); return getTOCEntry(DAG, SDLoc(JT), true, GA); @@ -2793,14 +2791,18 @@ // 64-bit SVR4 ABI code is always position-independent. // The actual BlockAddress is stored in the TOC. - if (Subtarget.isSVR4ABI() && - (Subtarget.isPPC64() || isPositionIndependent())) { - if (Subtarget.isPPC64()) - setUsesTOCBasePtr(DAG); + if (Subtarget.is64BitELFABI()) { + setUsesTOCBasePtr(DAG); SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); return getTOCEntry(DAG, SDLoc(BASDN), Subtarget.isPPC64(), GA); } + // 32-bit position-independent ELF stores the BlockAddress in the .got. + if (Subtarget.is32BitELFABI() && isPositionIndependent()) + return getTOCEntry( + DAG, SDLoc(BASDN), false, + DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); + unsigned MOHiFlag, MOLoFlag; bool IsPIC = isPositionIndependent(); getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); @@ -2915,7 +2917,7 @@ // 64-bit SVR4 ABI code is always position-independent. // The actual address of the GlobalValue is stored in the TOC. - if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) { + if (Subtarget.is64BitELFABI()) { setUsesTOCBasePtr(DAG); SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); return getTOCEntry(DAG, DL, true, GA); @@ -3377,17 +3379,17 @@ SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { - if (Subtarget.isSVR4ABI()) { - if (Subtarget.isPPC64()) - return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, - dl, DAG, InVals); - else - return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, - dl, DAG, InVals); - } else { - return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, - dl, DAG, InVals); - } + if (Subtarget.is64BitELFABI()) + return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, + InVals); + else if (Subtarget.is32BitELFABI()) + return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, + InVals); + + // We are using this for both AIX and Darwin. If this is intentional then we + // should rename it appropriately. + return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG, + InVals); } SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( @@ -4516,7 +4518,7 @@ static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl &Outs) { - assert(Subtarget.isSVR4ABI() && Subtarget.isPPC64()); + assert(Subtarget.is64BitELFABI()); const unsigned PtrByteSize = 8; const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); @@ -4926,7 +4928,7 @@ ImmutableCallSite CS, const PPCSubtarget &Subtarget) { bool isPPC64 = Subtarget.isPPC64(); bool isSVR4ABI = Subtarget.isSVR4ABI(); - bool isELFv2ABI = Subtarget.isELFv2ABI(); + bool isELFv1ABI = isPPC64 && isSVR4ABI && !Subtarget.isELFv2ABI(); bool isAIXABI = Subtarget.isAIXABI(); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); @@ -4997,7 +4999,7 @@ // to do the call, we can't use PPCISD::CALL. SDValue MTCTROps[] = {Chain, Callee, InFlag}; - if (isSVR4ABI && isPPC64 && !isELFv2ABI) { + if (isELFv1ABI) { // Function pointers in the 64-bit SVR4 ABI do not point to the function // entry point, but to the function descriptor (the function entry point // address is part of the function descriptor though). @@ -5085,7 +5087,7 @@ CallOpc = PPCISD::BCTRL; Callee.setNode(nullptr); // Add use of X11 (holding environment pointer) - if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest) + if (isELFv1ABI && !hasNest) Ops.push_back(DAG.getRegister(PPC::X11, PtrVT)); // Add CTR register as callee so a bctr can be emitted later. if (isTailCall) @@ -10485,7 +10487,7 @@ unsigned LabelReg = MRI.createVirtualRegister(PtrRC); unsigned BufReg = MI.getOperand(1).getReg(); - if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) { + if (Subtarget.is64BitELFABI()) { setUsesTOCBasePtr(*MBB->getParent()); MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) .addReg(PPC::X2) @@ -10662,7 +10664,7 @@ MachineBasicBlock *BB) const { if (MI.getOpcode() == TargetOpcode::STACKMAP || MI.getOpcode() == TargetOpcode::PATCHPOINT) { - if (Subtarget.isPPC64() && Subtarget.isSVR4ABI() && + if (Subtarget.is64BitELFABI() && MI.getOpcode() == TargetOpcode::PATCHPOINT) { // Call lowering should have added an r2 operand to indicate a dependence // on the TOC base pointer value. It can't however, because there is no @@ -14330,7 +14332,7 @@ bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { // 32-bit SVR4 ABI access everything as got-indirect. - if (Subtarget.isSVR4ABI() && !Subtarget.isPPC64()) + if (Subtarget.is32BitELFABI()) return true; CodeModel::Model CModel = getTargetMachine().getCodeModel(); @@ -15146,7 +15148,7 @@ bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { // Only duplicate to increase tail-calls for the 64bit SysV ABIs. - if (!Subtarget.isSVR4ABI() || !Subtarget.isPPC64()) + if (!Subtarget.is64BitELFABI()) return false; // If not a tail call then no need to proceed. diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -325,13 +325,13 @@ bool IsPositionIndependent = TM.isPositionIndependent(); if (hasBasePointer(MF)) { - if (Subtarget.isSVR4ABI() && !TM.isPPC64() && IsPositionIndependent) + if (Subtarget.is32BitELFABI() && IsPositionIndependent) markSuperRegs(Reserved, PPC::R29); else markSuperRegs(Reserved, PPC::R30); } - if (Subtarget.isSVR4ABI() && !TM.isPPC64() && IsPositionIndependent) + if (Subtarget.is32BitELFABI() && IsPositionIndependent) markSuperRegs(Reserved, PPC::R30); // Reserve Altivec registers when Altivec is unavailable. diff --git a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td --- a/llvm/lib/Target/PowerPC/PPCRegisterInfo.td +++ b/llvm/lib/Target/PowerPC/PPCRegisterInfo.td @@ -260,8 +260,7 @@ // put it at the end of the list. let AltOrders = [(add (sub GPRC, R2), R2)]; let AltOrderSelect = [{ - const PPCSubtarget &S = MF.getSubtarget(); - return S.isPPC64() && S.isSVR4ABI(); + return MF.getSubtarget().is64BitELFABI(); }]; } @@ -272,8 +271,7 @@ // put it at the end of the list. let AltOrders = [(add (sub G8RC, X2), X2)]; let AltOrderSelect = [{ - const PPCSubtarget &S = MF.getSubtarget(); - return S.isPPC64() && S.isSVR4ABI(); + return MF.getSubtarget().is64BitELFABI(); }]; } @@ -285,8 +283,7 @@ // put it at the end of the list. let AltOrders = [(add (sub GPRC_NOR0, R2), R2)]; let AltOrderSelect = [{ - const PPCSubtarget &S = MF.getSubtarget(); - return S.isPPC64() && S.isSVR4ABI(); + return MF.getSubtarget().is64BitELFABI(); }]; } @@ -295,8 +292,7 @@ // put it at the end of the list. let AltOrders = [(add (sub G8RC_NOX0, X2), X2)]; let AltOrderSelect = [{ - const PPCSubtarget &S = MF.getSubtarget(); - return S.isPPC64() && S.isSVR4ABI(); + return MF.getSubtarget().is64BitELFABI(); }]; } diff --git a/llvm/lib/Target/PowerPC/PPCSubtarget.h b/llvm/lib/Target/PowerPC/PPCSubtarget.h --- a/llvm/lib/Target/PowerPC/PPCSubtarget.h +++ b/llvm/lib/Target/PowerPC/PPCSubtarget.h @@ -316,6 +316,9 @@ bool isSVR4ABI() const { return !isDarwinABI() && !isAIXABI(); } bool isELFv2ABI() const; + bool is64BitELFABI() const { return isSVR4ABI() && isPPC64(); } + bool is32BitELFABI() const { return isSVR4ABI() && !isPPC64(); } + /// Originally, this function return hasISEL(). Now we always enable it, /// but may expand the ISEL instruction later. bool enableEarlyIfConversion() const override { return true; }