diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -1210,12 +1210,14 @@ .. _attr_align: ``align `` or ``align()`` - This indicates that the pointer value has the specified alignment. - If the pointer value does not have the specified alignment, - :ref:`poison value ` is returned or passed instead. The - ``align`` attribute should be combined with the ``noundef`` attribute to - ensure a pointer is aligned, or otherwise the behavior is undefined. Note - that ``align 1`` has no effect on non-byval, non-preallocated arguments. + This indicates that the pointer value or vector of pointers has the + specified alignment. If applied to a vector of pointers, *all* pointers + (elements) have the specified alignment. If the pointer value does not have + the specified alignment, :ref:`poison value ` is returned or + passed instead. The ``align`` attribute should be combined with the + ``noundef`` attribute to ensure a pointer is aligned, or otherwise the + behavior is undefined. Note that ``align 1`` has no effect on non-byval, + non-preallocated arguments. Note that this attribute has additional semantics when combined with the ``byval`` or ``preallocated`` attribute, which are documented there. @@ -19755,26 +19757,28 @@ operation. The return type and underlying type of the vector of pointers are the same vector types. +The :ref:`align ` parameter attribute can be provided for the first +argument. + Semantics: """""""""" The '``llvm.vp.gather``' intrinsic reads multiple scalar values from memory in the same way as the '``llvm.masked.gather``' intrinsic, where the mask is taken from the combination of the '``mask``' and '``evl``' operands in the usual VP -way. Of the '``llvm.masked.gather``' operands not set by '``llvm.vp.gather``': -the '``passthru``' operand is implicitly ``undef``; the '``alignment``' operand -is taken as the ABI alignment of the source addresses as specified by the -:ref:`datalayout string`. +way. Certain '``llvm.masked.gather``' operands do not have corresponding +operands in '``llvm.vp.gather``': the '``passthru``' operand is implicitly +``undef``; the '``alignment``' operand is taken as the ``align`` parameter, if +provided. The default alignment is taken as the ABI alignment of the source +addresses as specified by the :ref:`datalayout string`. Examples: """"""""" .. code-block:: text - %r = call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %mask, i32 %evl) + %r = call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> align 8 %ptrs, <8 x i1> %mask, i32 %evl) ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r - ;; Note that since the alignment is ultimately up to the data layout - ;; string, 8 is used as an example. %also.r = call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> undef) @@ -19810,15 +19814,20 @@ elements as the return type. The fourth is the explicit vector length of the operation. +The :ref:`align ` parameter attribute can be provided for the +second argument. + Semantics: """""""""" The '``llvm.vp.scatter``' intrinsic writes multiple scalar values to memory in the same way as the '``llvm.masked.scatter``' intrinsic, where the mask is taken from the combination of the '``mask``' and '``evl``' operands in the -usual VP way. The '``alignment``' operand of the '``llvm.masked.scatter``' -intrinsic is not set by '``llvm.vp.scatter``': it is taken as the ABI alignment -of the destination addresses as specified by the :ref:`datalayout +usual VP way. The '``alignment``' operand of the '``llvm.masked.scatter``' does +not have a corresponding operand in '``llvm.vp.scatter``': it is instead +provided via the optional ``align`` parameter attribute on the +vector-of-pointers operand. Otherwise it is taken as the ABI alignment of the +destination addresses as specified by the :ref:`datalayout string`. Examples: @@ -19826,12 +19835,10 @@ .. code-block:: text - call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, <8 x i1> %mask, i32 %evl) + call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> align 1 %ptrs, <8 x i1> %mask, i32 %evl) ;; For all lanes below %evl, the call above is lane-wise equivalent to the call below. - ;; Note that since the alignment is ultimately up to the data layout - ;; string, 8 is used as an example. - call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, i32 8, <8 x i1> %mask) + call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, i32 1, <8 x i1> %mask) .. _int_mload_mstore: diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7341,8 +7341,6 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); Value *PtrOperand = VPIntrin.getArgOperand(0); MaybeAlign Alignment = VPIntrin.getPointerAlignment(); - if (!Alignment) - Alignment = DAG.getEVTAlign(VT); AAMDNodes AAInfo = VPIntrin.getAAMetadata(); const MDNode *Ranges = VPIntrin.getMetadata(LLVMContext::MD_range); SDValue LD; @@ -7350,6 +7348,8 @@ if (!IsGather) { // Do not serialize variable-length loads of constant memory with // anything. + if (!Alignment) + Alignment = DAG.getEVTAlign(VT); MemoryLocation ML; if (VT.isScalableVector()) ML = MemoryLocation::getAfter(PtrOperand); @@ -7367,6 +7367,8 @@ LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2], MMO, false /*IsExpanding */); } else { + if (!Alignment) + Alignment = DAG.getEVTAlign(VT.getScalarType()); unsigned AS = PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( @@ -7407,11 +7409,11 @@ Value *PtrOperand = VPIntrin.getArgOperand(1); EVT VT = OpValues[0].getValueType(); MaybeAlign Alignment = VPIntrin.getPointerAlignment(); - if (!Alignment) - Alignment = DAG.getEVTAlign(VT); AAMDNodes AAInfo = VPIntrin.getAAMetadata(); SDValue ST; if (!IsScatter) { + if (!Alignment) + Alignment = DAG.getEVTAlign(VT); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo); @@ -7419,6 +7421,8 @@ DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], OpValues[1], OpValues[2], OpValues[3], MMO, false /* IsTruncating */); } else { + if (!Alignment) + Alignment = DAG.getEVTAlign(VT.getScalarType()); unsigned AS = PtrOperand->getType()->getScalarType()->getPointerAddressSpace(); MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -1812,12 +1812,12 @@ AttrBuilder Incompatible; if (!Ty->isIntegerTy()) - // Attribute that only apply to integers. + // Attributes that only apply to integers. Incompatible.addAttribute(Attribute::SExt) .addAttribute(Attribute::ZExt); if (!Ty->isPointerTy()) - // Attribute that only apply to pointers. + // Attributes that only apply to pointers. Incompatible.addAttribute(Attribute::Nest) .addAttribute(Attribute::NoAlias) .addAttribute(Attribute::NoCapture) @@ -1825,7 +1825,6 @@ .addAttribute(Attribute::ReadNone) .addAttribute(Attribute::ReadOnly) .addAttribute(Attribute::SwiftError) - .addAlignmentAttr(1) // the int here is ignored .addDereferenceableAttr(1) // the int here is ignored .addDereferenceableOrNullAttr(1) // the int here is ignored .addPreallocatedAttr(Ty) @@ -1835,6 +1834,10 @@ .addByRefAttr(Ty) .addTypeAttr(Attribute::ElementType, Ty); + if (!Ty->isPtrOrPtrVectorTy()) + // Attributes that only apply to pointers or vectors of pointers. + Incompatible.addAlignmentAttr(1); // the int here is ignored + // Some attributes can apply to all "values" but there are no `void` values. if (Ty->isVoidTy()) Incompatible.addAttribute(Attribute::NoUndef);