diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1114,14 +1114,36 @@ /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not /// less than FIRST_TARGET_MEMORY_OPCODE. SDValue getMemIntrinsicNode( - unsigned Opcode, const SDLoc &dl, SDVTList VTList, - ArrayRef Ops, EVT MemVT, - MachinePointerInfo PtrInfo, - unsigned Align = 0, - MachineMemOperand::Flags Flags - = MachineMemOperand::MOLoad | MachineMemOperand::MOStore, - uint64_t Size = 0, - const AAMDNodes &AAInfo = AAMDNodes()); + unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, + EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, + MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad | + MachineMemOperand::MOStore, + uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()); + + inline SDValue getMemIntrinsicNode( + unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, + EVT MemVT, MachinePointerInfo PtrInfo, MaybeAlign Alignment = None, + MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad | + MachineMemOperand::MOStore, + uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) { + // Ensure that codegen never sees alignment 0 + return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo, + Alignment.getValueOr(getEVTAlign(MemVT)), Flags, + Size, AAInfo); + } + + LLVM_ATTRIBUTE_DEPRECATED( + inline SDValue getMemIntrinsicNode( + unsigned Opcode, const SDLoc &dl, SDVTList VTList, + ArrayRef Ops, EVT MemVT, MachinePointerInfo PtrInfo, + unsigned Alignment, + MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad | + MachineMemOperand::MOStore, + uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()), + "") { + return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo, + MaybeAlign(Alignment), Flags, Size, AAInfo); + } SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, EVT MemVT, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6679,7 +6679,7 @@ SDValue SelectionDAG::getMemIntrinsicNode( unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef Ops, - EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment, + EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) { if (!Size && MemVT.isScalableVector()) Size = MemoryLocation::UnknownSize; @@ -6687,9 +6687,8 @@ Size = MemVT.getStoreSize(); MachineFunction &MF = getMachineFunction(); - MachineMemOperand *MMO = MF.getMachineMemOperand( - PtrInfo, Flags, Size, Alignment ? Align(Alignment) : getEVTAlign(MemVT), - AAInfo); + MachineMemOperand *MMO = + MF.getMachineMemOperand(PtrInfo, Flags, Size, Alignment, AAInfo); return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4713,10 +4713,10 @@ // This is target intrinsic that touches memory AAMDNodes AAInfo; I.getAAMetadata(AAInfo); - Result = DAG.getMemIntrinsicNode( - Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT, - MachinePointerInfo(Info.ptrVal, Info.offset), - Info.align ? Info.align->value() : 0, Info.flags, Info.size, AAInfo); + Result = + DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT, + MachinePointerInfo(Info.ptrVal, Info.offset), + Info.align, Info.flags, Info.size, AAInfo); } else if (!HasChain) { Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops); } else if (!I.getType()->isVoidTy()) { @@ -6529,12 +6529,10 @@ Ops[2] = getValue(I.getArgOperand(1)); Ops[3] = getValue(I.getArgOperand(2)); Ops[4] = getValue(I.getArgOperand(3)); - SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl, - DAG.getVTList(MVT::Other), Ops, - EVT::getIntegerVT(*Context, 8), - MachinePointerInfo(I.getArgOperand(0)), - 0, /* align */ - Flags); + SDValue Result = DAG.getMemIntrinsicNode( + ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops, + EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)), + /* align */ None, Flags); // Chain the prefetch in parallell with any pending loads, to stay out of // the way of later optimizations. diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.h @@ -491,8 +491,7 @@ std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl &, - unsigned retAlignment, - ImmutableCallSite CS) const; + MaybeAlign retAlignment, ImmutableCallSite CS) const; SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Outs, @@ -579,8 +578,8 @@ SelectionDAG &DAG) const override; SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; - unsigned getArgumentAlignment(SDValue Callee, ImmutableCallSite CS, Type *Ty, - unsigned Idx, const DataLayout &DL) const; + Align getArgumentAlignment(SDValue Callee, ImmutableCallSite CS, Type *Ty, + unsigned Idx, const DataLayout &DL) const; }; } // namespace llvm diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp --- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -218,11 +218,10 @@ // covered by the vector op. Otherwise, it returns 1. static unsigned CanMergeParamLoadStoresStartingAt( unsigned Idx, uint32_t AccessSize, const SmallVectorImpl &ValueVTs, - const SmallVectorImpl &Offsets, unsigned ParamAlignment) { - assert(isPowerOf2_32(AccessSize) && "must be a power of 2!"); + const SmallVectorImpl &Offsets, Align ParamAlignment) { // Can't vectorize if param alignment is not sufficient. - if (AccessSize > ParamAlignment) + if (ParamAlignment < AccessSize) return 1; // Can't vectorize if offset is not aligned. if (Offsets[Idx] & (AccessSize - 1)) @@ -282,7 +281,7 @@ static SmallVector VectorizePTXValueVTs(const SmallVectorImpl &ValueVTs, const SmallVectorImpl &Offsets, - unsigned ParamAlignment) { + Align ParamAlignment) { // Set vector size to match ValueVTs and mark all elements as // scalars by default. SmallVector VectorInfo; @@ -1243,7 +1242,7 @@ std::string NVPTXTargetLowering::getPrototype( const DataLayout &DL, Type *retTy, const ArgListTy &Args, - const SmallVectorImpl &Outs, unsigned retAlignment, + const SmallVectorImpl &Outs, MaybeAlign retAlignment, ImmutableCallSite CS) const { auto PtrVT = getPointerTy(DL); @@ -1279,8 +1278,8 @@ O << ".param .b" << PtrVT.getSizeInBits() << " _"; } else if (retTy->isAggregateType() || retTy->isVectorTy() || retTy->isIntegerTy(128)) { - O << ".param .align " << retAlignment << " .b8 _[" - << DL.getTypeAllocSize(retTy) << "]"; + O << ".param .align " << (retAlignment ? retAlignment->value() : 0) + << " .b8 _[" << DL.getTypeAllocSize(retTy) << "]"; } else { llvm_unreachable("Unknown return type"); } @@ -1353,16 +1352,16 @@ return O.str(); } -unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee, - ImmutableCallSite CS, - Type *Ty, unsigned Idx, - const DataLayout &DL) const { +Align NVPTXTargetLowering::getArgumentAlignment(SDValue Callee, + ImmutableCallSite CS, Type *Ty, + unsigned Idx, + const DataLayout &DL) const { if (!CS) { // CallSite is zero, fallback to ABI type alignment - return DL.getABITypeAlignment(Ty); + return DL.getABITypeAlign(Ty); } - unsigned Align = 0; + unsigned Alignment = 0; const Value *DirectCallee = CS.getCalledFunction(); if (!DirectCallee) { @@ -1374,8 +1373,8 @@ // With bitcast'd call targets, the instruction will be the call if (isa(CalleeI)) { // Check if we have call alignment metadata - if (getAlign(*cast(CalleeI), Idx, Align)) - return Align; + if (getAlign(*cast(CalleeI), Idx, Alignment)) + return Align(Alignment); const Value *CalleeV = cast(CalleeI)->getCalledValue(); // Ignore any bitcast instructions @@ -1397,12 +1396,12 @@ // Check for function alignment information if we found that the // ultimate target is a Function if (DirectCallee) - if (getAlign(*cast(DirectCallee), Idx, Align)) - return Align; + if (getAlign(*cast(DirectCallee), Idx, Alignment)) + return Align(Alignment); // Call is indirect or alignment information is not available, fall back to // the ABI type alignment - return DL.getABITypeAlignment(Ty); + return DL.getABITypeAlign(Ty); } SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, @@ -1450,15 +1449,14 @@ SmallVector VTs; SmallVector Offsets; ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets); - unsigned ArgAlign = - getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL); + Align ArgAlign = getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL); unsigned AllocSize = DL.getTypeAllocSize(Ty); SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue); bool NeedAlign; // Does argument declaration specify alignment? if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) { // declare .param .align .b8 .param[]; SDValue DeclareParamOps[] = { - Chain, DAG.getConstant(ArgAlign, dl, MVT::i32), + Chain, DAG.getConstant(ArgAlign.value(), dl, MVT::i32), DAG.getConstant(paramCount, dl, MVT::i32), DAG.getConstant(AllocSize, dl, MVT::i32), InFlag}; Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs, @@ -1539,8 +1537,9 @@ // Adjust type of the store op if we've extended the scalar // return value. EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j]; - unsigned EltAlign = - NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0; + MaybeAlign EltAlign; + if (NeedAlign) + EltAlign = commonAlignment(ArgAlign, Offsets[j]); Chain = DAG.getMemIntrinsicNode( Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands, @@ -1604,10 +1603,9 @@ DAG.getConstant(paramCount, dl, MVT::i32), DAG.getConstant(curOffset, dl, MVT::i32), theVal, InFlag }; - Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs, - CopyParamOps, elemtype, - MachinePointerInfo(), /* Align */ 0, - MachineMemOperand::MOStore); + Chain = DAG.getMemIntrinsicNode( + NVPTXISD::StoreParam, dl, CopyParamVTs, CopyParamOps, elemtype, + MachinePointerInfo(), /* Align */ None, MachineMemOperand::MOStore); InFlag = Chain.getValue(1); } @@ -1615,7 +1613,7 @@ } GlobalAddressSDNode *Func = dyn_cast(Callee.getNode()); - unsigned retAlignment = 0; + MaybeAlign retAlignment = None; // Handle Result if (Ins.size() > 0) { @@ -1644,11 +1642,12 @@ InFlag = Chain.getValue(1); } else { retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL); + assert(retAlignment && "retAlignment is guaranteed to be set"); SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); - SDValue DeclareRetOps[] = { Chain, - DAG.getConstant(retAlignment, dl, MVT::i32), - DAG.getConstant(resultsz / 8, dl, MVT::i32), - DAG.getConstant(0, dl, MVT::i32), InFlag }; + SDValue DeclareRetOps[] = { + Chain, DAG.getConstant(retAlignment->value(), dl, MVT::i32), + DAG.getConstant(resultsz / 8, dl, MVT::i32), + DAG.getConstant(0, dl, MVT::i32), InFlag}; Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs, DeclareRetOps); InFlag = Chain.getValue(1); @@ -1754,7 +1753,7 @@ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0); assert(VTs.size() == Ins.size() && "Bad value decomposition"); - unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL); + Align RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL); auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign); SmallVector LoadVTs; @@ -1770,7 +1769,7 @@ bool needTruncate = false; EVT TheLoadType = VTs[i]; EVT EltType = Ins[i].VT; - unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]); + Align EltAlign = commonAlignment(RetAlign, Offsets[i]); if (ExtendIntegerRetVal) { TheLoadType = MVT::i32; EltType = MVT::i32; @@ -2545,7 +2544,7 @@ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0); assert(VTs.size() > 0 && "Unexpected empty type."); auto VectorInfo = - VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty)); + VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlign(Ty)); SDValue Arg = getParamSymbol(DAG, idx, PtrVT); int VecIdx = -1; // Index of the first element of the current vector. @@ -2664,7 +2663,7 @@ assert(VTs.size() == OutVals.size() && "Bad return value decomposition"); auto VectorInfo = VectorizePTXValueVTs( - VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1); + VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlign(RetTy) : Align(1)); // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than // 32-bits are sign extended or zero extended, depending on whether @@ -2716,10 +2715,9 @@ // Adjust type of load/store op if we've extended the scalar // return value. EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i]; - Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other), - StoreOperands, TheStoreType, - MachinePointerInfo(), /* Align */ 1, - MachineMemOperand::MOStore); + Chain = DAG.getMemIntrinsicNode( + Op, dl, DAG.getVTList(MVT::Other), StoreOperands, TheStoreType, + MachinePointerInfo(), Align(1), MachineMemOperand::MOStore); // Cleanup vector state. StoreOperands.clear(); } diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2750,7 +2750,7 @@ SDValue Ops[] = { GA, Reg }; return DAG.getMemIntrinsicNode( PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, - MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, + MachinePointerInfo::getGOT(DAG.getMachineFunction()), None, MachineMemOperand::MOLoad); } diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -1185,7 +1185,7 @@ SDVTList VTs = CurDAG->getVTList(MVT::Other); SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp}; Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT, - MPI, /*Align*/ 0, + MPI, /*Align*/ None, MachineMemOperand::MOStore); if (N->getFlags().hasNoFPExcept()) { SDNodeFlags Flags = Store->getFlags(); @@ -1201,9 +1201,9 @@ if (!DstIsSSE) { SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other); SDValue Ops[] = {Store, MemTmp}; - Result = - CurDAG->getMemIntrinsicNode(X86ISD::FLD, dl, VTs, Ops, MemVT, MPI, - /*Align*/ 0, MachineMemOperand::MOLoad); + Result = CurDAG->getMemIntrinsicNode( + X86ISD::FLD, dl, VTs, Ops, MemVT, MPI, + /*Align*/ None, MachineMemOperand::MOLoad); if (N->getFlags().hasNoFPExcept()) { SDNodeFlags Flags = Result->getFlags(); Flags.setNoFPExcept(true); diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1215,7 +1215,7 @@ std::pair BuildFILD(EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer, MachinePointerInfo PtrInfo, - unsigned Align, + Align Alignment, SelectionDAG &DAG) const; bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -8419,11 +8419,9 @@ if (TLI.isTypeLegal(VecVT)) { SDVTList Tys = DAG.getVTList(VecVT, MVT::Other); SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; - SDValue ResNode = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, - LDBase->getPointerInfo(), - LDBase->getAlignment(), - MachineMemOperand::MOLoad); + SDValue ResNode = DAG.getMemIntrinsicNode( + X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(), + LDBase->getAlign(), MachineMemOperand::MOLoad); for (auto *LD : Loads) if (LD) DAG.makeEquivalentMemoryOrdering(LD, ResNode); @@ -8665,7 +8663,7 @@ SDValue CP = DAG.getConstantPool(C, PVT); unsigned Repeat = VT.getSizeInBits() / SplatBitSize; - unsigned Alignment = cast(CP)->getAlignment(); + MaybeAlign Alignment(cast(CP)->getAlignment()); SDVTList Tys = DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other); SDValue Ops[] = {DAG.getEntryNode(), CP}; @@ -8753,7 +8751,7 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout())); - unsigned Alignment = cast(CP)->getAlignment(); + MaybeAlign Alignment(cast(CP)->getAlignment()); SDVTList Tys = DAG.getVTList(VT, MVT::Other); SDValue Ops[] = {DAG.getEntryNode(), CP}; @@ -19248,15 +19246,16 @@ ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore); unsigned Size = SrcVT.getStoreSize(); + Align Alignment(Size); MachineFunction &MF = DAG.getMachineFunction(); auto PtrVT = getPointerTy(MF.getDataLayout()); - int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false); + int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false); MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI); SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); - Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Size); + Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment); std::pair Tmp = - BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Size, DAG); + BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG); if (IsStrict) return DAG.getMergeValues({Tmp.first, Tmp.second}, dl); @@ -19266,7 +19265,7 @@ std::pair X86TargetLowering::BuildFILD( EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer, - MachinePointerInfo PtrInfo, unsigned Alignment, SelectionDAG &DAG) const { + MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const { // Build the FILD SDVTList Tys; bool useSSE = isScalarFPTypeInSSEReg(DstVT); @@ -19559,8 +19558,8 @@ SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; SDValue VBias = DAG.getMemIntrinsicNode( X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64, - MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), - /*Alignment*/ 8, MachineMemOperand::MOLoad); + MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8), + MachineMemOperand::MOLoad); SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn, DAG.getBitcast(MVT::v4i64, VBias)); @@ -19739,7 +19738,7 @@ SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32), OffsetSlot, MPI.getWithOffset(4), 4); std::pair Tmp = - BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, 8, DAG); + BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, Align(8), DAG); if (IsStrict) return DAG.getMergeValues({Tmp.first, Tmp.second}, dl); @@ -19755,7 +19754,7 @@ ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore); } SDValue Store = - DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, 8 /*Align*/); + DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Align(8)); // For i64 source, we need to add the appropriate power of 2 if the input // was negative. This is the same as the optimization in // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, @@ -19763,9 +19762,9 @@ // in SSE. (The generic code can't know it's OK to do this, or how to.) SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); SDValue Ops[] = { Store, StackSlot }; - SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, - MVT::i64, MPI, 8 /*Align*/, - MachineMemOperand::MOLoad); + SDValue Fild = + DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI, + Align(8), MachineMemOperand::MOLoad); Chain = Fild.getValue(1); @@ -20716,14 +20715,13 @@ SDValue Ops[] = { Chain, StackPtr }; Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI, - /*Align*/0, MachineMemOperand::MOLoad); + /*Align*/ None, MachineMemOperand::MOLoad); Chain = Src.getValue(1); } SDValue StoreOps[] = { Chain, Src, StackPtr }; - Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, - DAG.getVTList(MVT::Other), StoreOps, - DstVT, MPI, /*Align*/0, + Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other), + StoreOps, DstVT, MPI, /*Align*/ None, MachineMemOperand::MOStore); return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI); @@ -23623,11 +23621,8 @@ DAG.getConstant(Align, dl, MVT::i32)}; SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other); SDValue VAARG = DAG.getMemIntrinsicNode( - X86ISD::VAARG_64, dl, - VTs, InstOps, MVT::i64, - MachinePointerInfo(SV), - /*Align=*/0, - MachineMemOperand::MOLoad | MachineMemOperand::MOStore); + X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, MachinePointerInfo(SV), + /*Align=*/None, MachineMemOperand::MOLoad | MachineMemOperand::MOStore); Chain = VAARG.getValue(1); // Load the next argument and return it @@ -25809,10 +25804,10 @@ SDValue Ops[] = {Chain, StackSlot}; Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI, - 2 /*Align*/, MachineMemOperand::MOStore); + Align(2), MachineMemOperand::MOStore); // Load FP Control Word from stack slot - SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, 2 /*Align*/); + SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2)); Chain = CWD.getValue(1); // Mask and turn the control bits into a shift for the lookup table. @@ -28487,7 +28482,7 @@ SDValue LdOps[] = {Chain, StackPtr}; SDValue Value = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI, - /*Align*/ 0, MachineMemOperand::MOLoad); + /*Align*/ None, MachineMemOperand::MOLoad); Chain = Value.getValue(1); // Now use an FIST to do the atomic store. @@ -29898,10 +29893,9 @@ MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI); SDValue StoreOps[] = { Chain, Result, StackPtr }; - Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl, - DAG.getVTList(MVT::Other), StoreOps, - MVT::i64, MPI, 0 /*Align*/, - MachineMemOperand::MOStore); + Chain = DAG.getMemIntrinsicNode( + X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64, + MPI, None /*Align*/, MachineMemOperand::MOStore); // Finally load the value back from the stack temporary and return it. // This load is not atomic and doesn't need to be. @@ -35321,11 +35315,9 @@ if (LN->isSimple()) { SDVTList Tys = DAG.getVTList(MVT::v2f64, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue VZLoad = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::f64, - LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue VZLoad = DAG.getMemIntrinsicNode( + X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::f64, LN->getPointerInfo(), + LN->getAlign(), LN->getMemOperand()->getFlags()); SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad); DCI.CombineTo(N.getNode(), Movddup); DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1)); @@ -35423,11 +35415,10 @@ if (LN->isSimple()) { SDVTList Tys = DAG.getVTList(VT, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue BcastLd = - DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, - MVT::i16, LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue BcastLd = DAG.getMemIntrinsicNode( + X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16, + LN->getPointerInfo(), LN->getAlign(), + LN->getMemOperand()->getFlags()); DCI.CombineTo(N.getNode(), BcastLd); DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1)); DCI.recursivelyDeleteUnusedNodes(LN); @@ -35468,12 +35459,11 @@ SDVTList Tys = DAG.getVTList(VT, MVT::Other); SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(), Offset, DL); SDValue Ops[] = { LN->getChain(), Ptr }; - SDValue BcastLd = - DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, - MVT::i16, - LN->getPointerInfo().getWithOffset(Offset), - MinAlign(LN->getAlignment(), Offset), - LN->getMemOperand()->getFlags()); + SDValue BcastLd = DAG.getMemIntrinsicNode( + X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16, + LN->getPointerInfo().getWithOffset(Offset), + commonAlignment(LN->getAlign(), Offset), + LN->getMemOperand()->getFlags()); DCI.CombineTo(N.getNode(), BcastLd); DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1)); DCI.recursivelyDeleteUnusedNodes(LN); @@ -35506,11 +35496,10 @@ if (LN->isSimple()) { SDVTList Tys = DAG.getVTList(VT, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue BcastLd = - DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, - MVT::f64, LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue BcastLd = DAG.getMemIntrinsicNode( + X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64, + LN->getPointerInfo(), LN->getAlign(), + LN->getMemOperand()->getFlags()); DCI.CombineTo(N.getNode(), BcastLd); DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1)); DCI.recursivelyDeleteUnusedNodes(LN); @@ -36271,12 +36260,10 @@ if (LN->isSimple()) { SDVTList Tys = DAG.getVTList(VT, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue VZLoad = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, - VT.getVectorElementType(), - LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue VZLoad = DAG.getMemIntrinsicNode( + X86ISD::VZEXT_LOAD, dl, Tys, Ops, VT.getVectorElementType(), + LN->getPointerInfo(), LN->getAlign(), + LN->getMemOperand()->getFlags()); DCI.CombineTo(N, VZLoad); DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1)); DCI.recursivelyDeleteUnusedNodes(LN); @@ -44207,11 +44194,9 @@ MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits); SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue VZLoad = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT, - LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue VZLoad = DAG.getMemIntrinsicNode( + X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT, LN->getPointerInfo(), + LN->getAlign(), LN->getMemOperand()->getFlags()); SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad)); DCI.CombineTo(N, Convert); @@ -44243,11 +44228,9 @@ MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits); SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue VZLoad = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT, - LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue VZLoad = DAG.getMemIntrinsicNode( + X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT, LN->getPointerInfo(), + LN->getAlign(), LN->getMemOperand()->getFlags()); SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad)); DCI.CombineTo(N, Convert); @@ -44332,11 +44315,9 @@ SDLoc dl(N); SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other); SDValue Ops[] = { LN->getChain(), LN->getBasePtr() }; - SDValue VZLoad = - DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MVT::i64, - LN->getPointerInfo(), - LN->getAlignment(), - LN->getMemOperand()->getFlags()); + SDValue VZLoad = DAG.getMemIntrinsicNode( + X86ISD::VZEXT_LOAD, dl, Tys, Ops, MVT::i64, LN->getPointerInfo(), + LN->getAlign(), LN->getMemOperand()->getFlags()); SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32, DAG.getBitcast(MVT::v8i16, VZLoad)); DCI.CombineTo(N, Convert); @@ -45580,7 +45561,7 @@ std::pair Tmp = Subtarget.getTargetLowering()->BuildFILD( VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(), - Ld->getPointerInfo(), Ld->getAlignment(), DAG); + Ld->getPointerInfo(), Ld->getAlign(), DAG); DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second); return Tmp.first; }