diff --git a/llvm/include/llvm/CodeGen/MachineFunction.h b/llvm/include/llvm/CodeGen/MachineFunction.h --- a/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/llvm/include/llvm/CodeGen/MachineFunction.h @@ -819,12 +819,15 @@ AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic); /// FIXME: Remove once transition to Align is over. - inline MachineMemOperand *getMachineMemOperand( - MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, - unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(), - const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System, - AtomicOrdering Ordering = AtomicOrdering::NotAtomic, - AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic) { + LLVM_ATTRIBUTE_DEPRECATED( + inline MachineMemOperand *getMachineMemOperand( + MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, + unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(), + const MDNode *Ranges = nullptr, + SyncScope::ID SSID = SyncScope::System, + AtomicOrdering Ordering = AtomicOrdering::NotAtomic, + AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic), + "Use the version that takes Align instead") { return getMachineMemOperand(PtrInfo, f, s, Align(base_alignment), AAInfo, Ranges, SSID, Ordering, FailureOrdering); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6679,19 +6679,17 @@ SDValue SelectionDAG::getMemIntrinsicNode( unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, - EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align, + EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment, MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { - if (Align == 0) // Ensure that codegen never sees alignment 0 - Align = getEVTAlignment(MemVT); - if (!Size && MemVT.isScalableVector()) Size = MemoryLocation::UnknownSize; else if (!Size) Size = MemVT.getStoreSize(); MachineFunction &MF = getMachineFunction(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo); + MachineMemOperand *MMO = MF.getMachineMemOperand( + PtrInfo, Flags, Size, Alignment ? Align(Alignment) : getEVTAlign(MemVT), + AAInfo); return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4173,25 +4173,26 @@ bool IsCompressing) { SDLoc sdl = getCurSDLoc(); - auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, - unsigned& Alignment) { + auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, + MaybeAlign &Alignment) { // llvm.masked.store.*(Src0, Ptr, alignment, Mask) Src0 = I.getArgOperand(0); Ptr = I.getArgOperand(1); - Alignment = cast(I.getArgOperand(2))->getZExtValue(); + Alignment = + MaybeAlign(cast(I.getArgOperand(2))->getZExtValue()); Mask = I.getArgOperand(3); }; - auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, - unsigned& Alignment) { + auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, + MaybeAlign &Alignment) { // llvm.masked.compressstore.*(Src0, Ptr, Mask) Src0 = I.getArgOperand(0); Ptr = I.getArgOperand(1); Mask = I.getArgOperand(2); - Alignment = 0; + Alignment = None; }; Value *PtrOperand, *MaskOperand, *Src0Operand; - unsigned Alignment; + MaybeAlign Alignment; if (IsCompressing) getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); else @@ -4204,19 +4205,16 @@ EVT VT = Src0.getValueType(); if (!Alignment) - Alignment = DAG.getEVTAlignment(VT); + Alignment = DAG.getEVTAlign(VT); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); - MachineMemOperand *MMO = - DAG.getMachineFunction(). - getMachineMemOperand(MachinePointerInfo(PtrOperand), - MachineMemOperand::MOStore, - // TODO: Make MachineMemOperands aware of scalable - // vectors. - VT.getStoreSize().getKnownMinSize(), - Alignment, AAInfo); + MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( + MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, + // TODO: Make MachineMemOperands aware of scalable + // vectors. + VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo); SDValue StoreNode = DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO, ISD::UNINDEXED, false /* Truncating */, IsCompressing); @@ -4316,9 +4314,9 @@ SDValue Src0 = getValue(I.getArgOperand(0)); SDValue Mask = getValue(I.getArgOperand(3)); EVT VT = Src0.getValueType(); - unsigned Alignment = (cast(I.getArgOperand(2)))->getZExtValue(); + MaybeAlign Alignment(cast(I.getArgOperand(2))->getZExtValue()); if (!Alignment) - Alignment = DAG.getEVTAlignment(VT); + Alignment = DAG.getEVTAlign(VT); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); AAMDNodes AAInfo; @@ -4331,13 +4329,11 @@ bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this); unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); - MachineMemOperand *MMO = DAG.getMachineFunction(). - getMachineMemOperand(MachinePointerInfo(AS), - MachineMemOperand::MOStore, - // TODO: Make MachineMemOperands aware of scalable - // vectors. - MemoryLocation::UnknownSize, - Alignment, AAInfo); + MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( + MachinePointerInfo(AS), MachineMemOperand::MOStore, + // TODO: Make MachineMemOperands aware of scalable + // vectors. + MemoryLocation::UnknownSize, *Alignment, AAInfo); if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); @@ -4354,25 +4350,26 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { SDLoc sdl = getCurSDLoc(); - auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, - unsigned& Alignment) { + auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, + MaybeAlign &Alignment) { // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) Ptr = I.getArgOperand(0); - Alignment = cast(I.getArgOperand(1))->getZExtValue(); + Alignment = + MaybeAlign(cast(I.getArgOperand(1))->getZExtValue()); Mask = I.getArgOperand(2); Src0 = I.getArgOperand(3); }; - auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, - unsigned& Alignment) { + auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0, + MaybeAlign &Alignment) { // @llvm.masked.expandload.*(Ptr, Mask, Src0) Ptr = I.getArgOperand(0); - Alignment = 0; + Alignment = None; Mask = I.getArgOperand(1); Src0 = I.getArgOperand(2); }; Value *PtrOperand, *MaskOperand, *Src0Operand; - unsigned Alignment; + MaybeAlign Alignment; if (IsExpanding) getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); else @@ -4385,7 +4382,7 @@ EVT VT = Src0.getValueType(); if (!Alignment) - Alignment = DAG.getEVTAlignment(VT); + Alignment = DAG.getEVTAlign(VT); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); @@ -4403,14 +4400,11 @@ SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); - MachineMemOperand *MMO = - DAG.getMachineFunction(). - getMachineMemOperand(MachinePointerInfo(PtrOperand), - MachineMemOperand::MOLoad, - // TODO: Make MachineMemOperands aware of scalable - // vectors. - VT.getStoreSize().getKnownMinSize(), - Alignment, AAInfo, Ranges); + MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( + MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad, + // TODO: Make MachineMemOperands aware of scalable + // vectors. + VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges); SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO, @@ -4430,9 +4424,9 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); - unsigned Alignment = (cast(I.getArgOperand(1)))->getZExtValue(); + MaybeAlign Alignment(cast(I.getArgOperand(1))->getZExtValue()); if (!Alignment) - Alignment = DAG.getEVTAlignment(VT); + Alignment = DAG.getEVTAlign(VT); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); @@ -4445,14 +4439,11 @@ SDValue Scale; bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this); unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace(); - MachineMemOperand *MMO = - DAG.getMachineFunction(). - getMachineMemOperand(MachinePointerInfo(AS), - MachineMemOperand::MOLoad, - // TODO: Make MachineMemOperands aware of scalable - // vectors. - MemoryLocation::UnknownSize, - Alignment, AAInfo, Ranges); + MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( + MachinePointerInfo(AS), MachineMemOperand::MOLoad, + // TODO: Make MachineMemOperands aware of scalable + // vectors. + MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges); if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); @@ -4479,16 +4470,14 @@ MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); - auto Alignment = DAG.getEVTAlignment(MemVT); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout()); MachineFunction &MF = DAG.getMachineFunction(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), - Flags, MemVT.getStoreSize(), Alignment, - AAMDNodes(), nullptr, SSID, SuccessOrdering, - FailureOrdering); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), + DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering, + FailureOrdering); SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain, @@ -4527,15 +4516,13 @@ SDValue InChain = getRoot(); auto MemVT = getValue(I.getValOperand()).getSimpleValueType(); - auto Alignment = DAG.getEVTAlignment(MemVT); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout()); MachineFunction &MF = DAG.getMachineFunction(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, - MemVT.getStoreSize(), Alignment, AAMDNodes(), - nullptr, SSID, Ordering); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), + DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering); SDValue L = DAG.getAtomic(NT, dl, MemVT, InChain, diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -2176,32 +2176,33 @@ // Rationale: This sequence saves uop updates compared to a sequence of // pre-increment spills like stp xi,xj,[sp,#-16]! // Note: Similar rationale and sequence for restores in epilog. - unsigned Size, Align; + unsigned Size; + Align Alignment; switch (RPI.Type) { case RegPairInfo::GPR: StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui; Size = 8; - Align = 8; + Alignment = Align(8); break; case RegPairInfo::FPR64: StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui; Size = 8; - Align = 8; + Alignment = Align(8); break; case RegPairInfo::FPR128: StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui; Size = 16; - Align = 16; + Alignment = Align(16); break; case RegPairInfo::ZPR: StrOpc = AArch64::STR_ZXI; Size = 16; - Align = 16; + Alignment = Align(16); break; case RegPairInfo::PPR: StrOpc = AArch64::STR_PXI; Size = 2; - Align = 2; + Alignment = Align(2); break; } LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI); @@ -2230,7 +2231,7 @@ MIB.addReg(Reg2, getPrologueDeath(MF, Reg2)); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), - MachineMemOperand::MOStore, Size, Align)); + MachineMemOperand::MOStore, Size, Alignment)); } MIB.addReg(Reg1, getPrologueDeath(MF, Reg1)) .addReg(AArch64::SP) @@ -2238,8 +2239,8 @@ // where factor*scale is implicit .setMIFlag(MachineInstr::FrameSetup); MIB.addMemOperand(MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(MF,FrameIdxReg1), - MachineMemOperand::MOStore, Size, Align)); + MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), + MachineMemOperand::MOStore, Size, Alignment)); if (NeedsWinCFI) InsertSEH(MIB, TII, MachineInstr::FrameSetup); @@ -2281,32 +2282,33 @@ // ldp x22, x21, [sp, #0] // addImm(+0) // Note: see comment in spillCalleeSavedRegisters() unsigned LdrOpc; - unsigned Size, Align; + unsigned Size; + Align Alignment; switch (RPI.Type) { case RegPairInfo::GPR: LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui; Size = 8; - Align = 8; + Alignment = Align(8); break; case RegPairInfo::FPR64: LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui; Size = 8; - Align = 8; + Alignment = Align(8); break; case RegPairInfo::FPR128: LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui; Size = 16; - Align = 16; + Alignment = Align(16); break; case RegPairInfo::ZPR: LdrOpc = AArch64::LDR_ZXI; Size = 16; - Align = 16; + Alignment = Align(16); break; case RegPairInfo::PPR: LdrOpc = AArch64::LDR_PXI; Size = 2; - Align = 2; + Alignment = Align(2); break; } LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI); @@ -2329,7 +2331,7 @@ MIB.addReg(Reg2, getDefRegState(true)); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdxReg2), - MachineMemOperand::MOLoad, Size, Align)); + MachineMemOperand::MOLoad, Size, Alignment)); } MIB.addReg(Reg1, getDefRegState(true)) .addReg(AArch64::SP) @@ -2338,7 +2340,7 @@ .setMIFlag(MachineInstr::FrameDestroy); MIB.addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FrameIdxReg1), - MachineMemOperand::MOLoad, Size, Align)); + MachineMemOperand::MOLoad, Size, Alignment)); if (NeedsWinCFI) InsertSEH(MIB, TII, MachineInstr::FrameDestroy); }; diff --git a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp --- a/llvm/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/llvm/lib/Target/ARM/ARMInstructionSelector.cpp @@ -627,7 +627,7 @@ bool UseMovt = STI.useMovt(); unsigned Size = TM.getPointerSize(0); - unsigned Alignment = 4; + const Align Alignment(4); auto addOpsForConstantPoolLoad = [&MF, Alignment, Size](MachineInstrBuilder &MIB, @@ -639,10 +639,10 @@ auto CPIndex = // For SB relative entries we need a target-specific constant pool. // Otherwise, just use a regular constant pool entry. - IsSBREL - ? ConstPool->getConstantPoolIndex( - ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment) - : ConstPool->getConstantPoolIndex(GV, Alignment); + IsSBREL ? ConstPool->getConstantPoolIndex( + ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), + Alignment.value()) + : ConstPool->getConstantPoolIndex(GV, Alignment.value()); MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0) .addMemOperand(MF.getMachineMemOperand( MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad, @@ -990,14 +990,14 @@ case G_FCONSTANT: { // Load from constant pool unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8; - unsigned Alignment = Size; + Align Alignment(Size); assert((Size == 4 || Size == 8) && "Unsupported FP constant type"); auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD; auto ConstPool = MF.getConstantPool(); - auto CPIndex = - ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment); + auto CPIndex = ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), + Alignment.value()); MIB->setDesc(TII.get(LoadOpcode)); MIB->RemoveOperand(1); MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)