diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -737,8 +737,14 @@ :ref:`Scalable vectors ` cannot be global variables or members of arrays because their size is unknown at compile time. They are allowed in -structs to facilitate intrinsics returning multiple values. Structs containing -scalable vectors cannot be used in loads, stores, allocas, or GEPs. +structs to facilitate intrinsics returning multiple values. Generally, structs +containing scalable vectors are not considered "sized" and cannot be used in +loads, stores, allocas, or GEPs. The only exception to this rule is for structs +that contains scalable vectors of the same type (e.g. {, +} contains the same type while {, +} doesn't). These kind of structs (we may call them +homogeneous scalable vector structs) are considered sized and can used in +loads, stores, allocas, but not GEPs. Syntax:: @@ -10247,6 +10253,11 @@ '``type``' may be any sized type. +Structs containing scalable vectors cannot be used in allocas unless all +fields are the same scalable vector type (e.g. {, +} contains the same type while {, +} doesn't). + Semantics: """""""""" diff --git a/llvm/include/llvm/CodeGen/Analysis.h b/llvm/include/llvm/CodeGen/Analysis.h --- a/llvm/include/llvm/CodeGen/Analysis.h +++ b/llvm/include/llvm/CodeGen/Analysis.h @@ -64,15 +64,33 @@ /// void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl &ValueVTs, - SmallVectorImpl *Offsets = nullptr, + SmallVectorImpl *Offsets, + TypeSize StartingOffset); +void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, + SmallVectorImpl &ValueVTs, + SmallVectorImpl *Offsets = nullptr, uint64_t StartingOffset = 0); +void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, + SmallVectorImpl &ValueVTs, + SmallVectorImpl *FixedOffsets, + uint64_t StartingOffset); /// Variant of ComputeValueVTs that also produces the memory VTs. void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl &ValueVTs, SmallVectorImpl *MemVTs, - SmallVectorImpl *Offsets = nullptr, + SmallVectorImpl *Offsets, + TypeSize StartingOffset); +void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, + SmallVectorImpl &ValueVTs, + SmallVectorImpl *MemVTs, + SmallVectorImpl *Offsets = nullptr, uint64_t StartingOffset = 0); +void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, + SmallVectorImpl &ValueVTs, + SmallVectorImpl *MemVTs, + SmallVectorImpl *FixedOffsets, + uint64_t StartingOffset); /// computeValueLLTs - Given an LLVM IR type, compute a sequence of /// LLTs that represent all the individual underlying diff --git a/llvm/include/llvm/IR/DataLayout.h b/llvm/include/llvm/IR/DataLayout.h --- a/llvm/include/llvm/IR/DataLayout.h +++ b/llvm/include/llvm/IR/DataLayout.h @@ -615,16 +615,16 @@ /// Used to lazily calculate structure layout information for a target machine, /// based on the DataLayout structure. -class StructLayout final : public TrailingObjects { - uint64_t StructSize; +class StructLayout final : public TrailingObjects { + TypeSize StructSize; Align StructAlignment; unsigned IsPadded : 1; unsigned NumElements : 31; public: - uint64_t getSizeInBytes() const { return StructSize; } + TypeSize getSizeInBytes() const { return StructSize; } - uint64_t getSizeInBits() const { return 8 * StructSize; } + TypeSize getSizeInBits() const { return 8 * StructSize; } Align getAlignment() const { return StructAlignment; } @@ -634,23 +634,22 @@ /// Given a valid byte offset into the structure, returns the structure /// index that contains it. - unsigned getElementContainingOffset(uint64_t Offset) const; + unsigned getElementContainingOffset(uint64_t FixedOffset) const; - MutableArrayRef getMemberOffsets() { - return llvm::MutableArrayRef(getTrailingObjects(), - NumElements); + MutableArrayRef getMemberOffsets() { + return llvm::MutableArrayRef(getTrailingObjects(), NumElements); } - ArrayRef getMemberOffsets() const { - return llvm::ArrayRef(getTrailingObjects(), NumElements); + ArrayRef getMemberOffsets() const { + return llvm::ArrayRef(getTrailingObjects(), NumElements); } - uint64_t getElementOffset(unsigned Idx) const { + TypeSize getElementOffset(unsigned Idx) const { assert(Idx < NumElements && "Invalid element idx!"); return getMemberOffsets()[Idx]; } - uint64_t getElementOffsetInBits(unsigned Idx) const { + TypeSize getElementOffsetInBits(unsigned Idx) const { return getElementOffset(Idx) * 8; } @@ -659,7 +658,7 @@ StructLayout(StructType *ST, const DataLayout &DL); - size_t numTrailingObjects(OverloadToken) const { + size_t numTrailingObjects(OverloadToken) const { return NumElements; } }; @@ -680,8 +679,7 @@ } case Type::StructTyID: // Get the layout annotation... which is lazily created on demand. - return TypeSize::Fixed( - getStructLayout(cast(Ty))->getSizeInBits()); + return getStructLayout(cast(Ty))->getSizeInBits(); case Type::IntegerTyID: return TypeSize::Fixed(Ty->getIntegerBitWidth()); case Type::HalfTyID: diff --git a/llvm/include/llvm/IR/DerivedTypes.h b/llvm/include/llvm/IR/DerivedTypes.h --- a/llvm/include/llvm/IR/DerivedTypes.h +++ b/llvm/include/llvm/IR/DerivedTypes.h @@ -286,6 +286,14 @@ /// Returns true if this struct contains a scalable vector. bool containsScalableVectorType() const; + /// Returns true if this struct contains homogeneous scalable vector types. + /// Note that the definition homogeneous scalable vector type is not recursive + /// here. That means the following structure will return a false when calling + /// this function. + /// {{, }, + /// {, }} + bool containsHomogeneousScalableVectorTypes() const; + /// Return true if this is a named struct that has a non-empty name. bool hasName() const { return SymbolTableEntry != nullptr; } diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h --- a/llvm/include/llvm/IR/Instructions.h +++ b/llvm/include/llvm/IR/Instructions.h @@ -1167,6 +1167,9 @@ ResultElementType(getIndexedType(PointeeType, IdxList)) { assert(cast(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)); + assert(!(SourceElementType->isStructTy() && + cast(SourceElementType)->containsScalableVectorType()) && + "GEP does not support struct types with scalable vector"); init(Ptr, IdxList, NameStr); } @@ -1181,6 +1184,9 @@ ResultElementType(getIndexedType(PointeeType, IdxList)) { assert(cast(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)); + assert(!(SourceElementType->isStructTy() && + cast(SourceElementType)->containsScalableVectorType()) && + "GEP does not support struct types with scalable vector"); init(Ptr, IdxList, NameStr); } diff --git a/llvm/include/llvm/IR/Type.h b/llvm/include/llvm/IR/Type.h --- a/llvm/include/llvm/IR/Type.h +++ b/llvm/include/llvm/IR/Type.h @@ -211,9 +211,7 @@ /// Return true if this is a scalable vector type or a target extension type /// with a scalable layout. - bool isScalableTy() const { - return getTypeID() == ScalableVectorTyID || isScalableTargetExtTy(); - } + bool isScalableTy() const; /// Return true if this is a FP type or a vector of FP. bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); } diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -3740,6 +3740,8 @@ for (const SCEV *IndexExpr : IndexExprs) { // Compute the (potentially symbolic) offset in bytes for this index. if (StructType *STy = dyn_cast(CurTy)) { + assert(!STy->containsScalableVectorType() && + "Structure with scalable vector type should not use GEP"); // For a struct, add the member offset. ConstantInt *Index = cast(IndexExpr)->getValue(); unsigned FieldNo = Index->getZExtValue(); diff --git a/llvm/lib/CodeGen/Analysis.cpp b/llvm/lib/CodeGen/Analysis.cpp --- a/llvm/lib/CodeGen/Analysis.cpp +++ b/llvm/lib/CodeGen/Analysis.cpp @@ -79,8 +79,8 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl &ValueVTs, SmallVectorImpl *MemVTs, - SmallVectorImpl *Offsets, - uint64_t StartingOffset) { + SmallVectorImpl *Offsets, + TypeSize StartingOffset) { // Given a struct type, recursively traverse the elements. if (StructType *STy = dyn_cast(Ty)) { // If the Offsets aren't needed, don't query the struct layout. This allows @@ -92,7 +92,8 @@ EE = STy->element_end(); EI != EE; ++EI) { // Don't compute the element offset if we didn't get a StructLayout above. - uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0; + TypeSize EltOffset = SL ? SL->getElementOffset(EI - EB) + : TypeSize::get(0, StartingOffset.isScalable()); ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets, StartingOffset + EltOffset); } @@ -101,7 +102,7 @@ // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue(); + TypeSize EltSize = DL.getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets, StartingOffset + i * EltSize); @@ -120,12 +121,62 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl &ValueVTs, - SmallVectorImpl *Offsets, - uint64_t StartingOffset) { + SmallVectorImpl *Offsets, + TypeSize StartingOffset) { return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets, StartingOffset); } +void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, + Type *Ty, SmallVectorImpl &ValueVTs, + SmallVectorImpl *Offsets, + uint64_t StartingOffset) { + TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy()); + return ComputeValueVTs(TLI, DL, Ty, ValueVTs, Offsets, Offset); +} + +void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, + Type *Ty, SmallVectorImpl &ValueVTs, + SmallVectorImpl *FixedOffsets, + uint64_t StartingOffset) { + TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy()); + SmallVector Offsets; + if (FixedOffsets) + ComputeValueVTs(TLI, DL, Ty, ValueVTs, &Offsets, Offset); + else + ComputeValueVTs(TLI, DL, Ty, ValueVTs, nullptr, Offset); + + if (FixedOffsets) + for (TypeSize Offset : Offsets) + FixedOffsets->push_back(Offset.getKnownMinValue()); +} + +void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, + Type *Ty, SmallVectorImpl &ValueVTs, + SmallVectorImpl *MemVTs, + SmallVectorImpl *Offsets, + uint64_t StartingOffset) { + TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy()); + return ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets, Offset); +} + +void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, + Type *Ty, SmallVectorImpl &ValueVTs, + SmallVectorImpl *MemVTs, + SmallVectorImpl *FixedOffsets, + uint64_t StartingOffset) { + TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy()); + SmallVector Offsets; + if (FixedOffsets) + ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, &Offsets, Offset); + else + ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, nullptr, Offset); + + if (FixedOffsets) + for (TypeSize Offset : Offsets) + FixedOffsets->push_back(Offset.getKnownMinValue()); +} + void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl &ValueTys, SmallVectorImpl *Offsets, diff --git a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -152,12 +152,15 @@ false, AI); } - // Scalable vectors may need a special StackID to distinguish - // them from other (fixed size) stack objects. - if (isa(Ty)) + // Scalable vectors and structure that contains scalable vector may + // need a special StackID to distinguish them from other (fixed size) + // stack objects. + if (isa(Ty) || + (isa(Ty) && + cast(Ty)->containsScalableVectorType())) { MF->getFrameInfo().setStackID(FrameIndex, TFI->getStackIDForScalableVectors()); - + } StaticAllocaMap[AI] = FrameIndex; // Update the catch handler information. if (Iter != CatchObjects.end()) { diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2008,7 +2008,7 @@ SmallVector ValueVTs, MemVTs; SmallVector Offsets; ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs, - &Offsets); + &Offsets, 0); unsigned NumValues = ValueVTs.size(); SmallVector Chains(NumValues); @@ -4139,7 +4139,7 @@ Type *Ty = I.getType(); SmallVector ValueVTs, MemVTs; SmallVector Offsets; - ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets); + ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets, 0); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; @@ -4238,7 +4238,7 @@ SmallVector Offsets; const Value *SrcV = I.getOperand(0); ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), - SrcV->getType(), ValueVTs, &Offsets); + SrcV->getType(), ValueVTs, &Offsets, 0); assert(ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"); @@ -4274,7 +4274,7 @@ SmallVector ValueVTs; SmallVector Offsets; ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty, - ValueVTs, &Offsets); + ValueVTs, &Offsets, 0); assert(ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"); @@ -4311,7 +4311,7 @@ SmallVector ValueVTs, MemVTs; SmallVector Offsets; ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), - SrcV->getType(), ValueVTs, &MemVTs, &Offsets); + SrcV->getType(), ValueVTs, &MemVTs, &Offsets, 0); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; @@ -9893,7 +9893,7 @@ SmallVector RetTys; SmallVector Offsets; auto &DL = CLI.DAG.getDataLayout(); - ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); + ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0); if (CLI.IsPostTypeLegalization) { // If we are lowering a libcall after legalization, split the return type. diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -45,9 +45,11 @@ // Support for StructLayout //===----------------------------------------------------------------------===// -StructLayout::StructLayout(StructType *ST, const DataLayout &DL) { +StructLayout::StructLayout(StructType *ST, const DataLayout &DL) + : StructSize(TypeSize::Fixed(0)) { assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); - StructSize = 0; + if (ST->containsScalableVectorType()) + StructSize = TypeSize::Scalable(0); IsPadded = false; NumElements = ST->getNumElements(); @@ -57,9 +59,16 @@ const Align TyAlign = ST->isPacked() ? Align(1) : DL.getABITypeAlign(Ty); // Add padding if necessary to align the data element properly. - if (!isAligned(TyAlign, StructSize)) { + // Currently the only structure with scalable size will be the be the + // homogeneous scalable vector types. Homogeneous scalable vector types + // have members of the same data type so no alignment issue will happen. + // The condition here assumes so and needs to be adjusted if this assumption + // changes (e.g. we support structures with arbitrary scalable data type, or + // structure that contains both fixed size and scalable size data type + // members). + if (!StructSize.isScalable() && !isAligned(TyAlign, StructSize)) { IsPadded = true; - StructSize = alignTo(StructSize, TyAlign); + StructSize = TypeSize::Fixed(alignTo(StructSize, TyAlign)); } // Keep track of maximum alignment constraint. @@ -67,28 +76,39 @@ getMemberOffsets()[i] = StructSize; // Consume space for this data item - StructSize += DL.getTypeAllocSize(Ty).getFixedValue(); + StructSize += DL.getTypeAllocSize(Ty); } // Add padding to the end of the struct so that it could be put in an array // and all array elements would be aligned correctly. - if (!isAligned(StructAlignment, StructSize)) { + if (!StructSize.isScalable() && !isAligned(StructAlignment, StructSize)) { IsPadded = true; - StructSize = alignTo(StructSize, StructAlignment); + StructSize = TypeSize::Fixed(alignTo(StructSize, StructAlignment)); } } /// getElementContainingOffset - Given a valid offset into the structure, /// return the structure index that contains it. -unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { - ArrayRef MemberOffsets = getMemberOffsets(); - auto SI = llvm::upper_bound(MemberOffsets, Offset); +unsigned StructLayout::getElementContainingOffset(uint64_t FixedOffset) const { + assert(!StructSize.isScalable() && + "Object that this method acts on should not be a structure with" + "scalable type"); + TypeSize Offset = TypeSize::Fixed(FixedOffset); + ArrayRef MemberOffsets = getMemberOffsets(); + + const auto *SI = + std::upper_bound(MemberOffsets.begin(), MemberOffsets.end(), Offset, + [](TypeSize LHS, TypeSize RHS) -> bool { + return TypeSize::isKnownLT(LHS, RHS); + }); assert(SI != MemberOffsets.begin() && "Offset not in structure type!"); --SI; - assert(*SI <= Offset && "upper_bound didn't work"); - assert((SI == MemberOffsets.begin() || *(SI - 1) <= Offset) && - (SI + 1 == MemberOffsets.end() || *(SI + 1) > Offset) && - "Upper bound didn't work!"); + assert(TypeSize::isKnownLE(*SI, Offset) && "upper_bound didn't work"); + assert( + (SI == MemberOffsets.begin() || TypeSize::isKnownLE(*(SI - 1), Offset)) && + (SI + 1 == MemberOffsets.end() || + TypeSize::isKnownGT(*(SI + 1), Offset)) && + "Upper bound didn't work!"); // Multiple fields can have the same offset if any of them are zero sized. // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop @@ -706,7 +726,7 @@ // Otherwise, create the struct layout. Because it is variable length, we // malloc it, then use placement new. StructLayout *L = (StructLayout *)safe_malloc( - StructLayout::totalSizeToAlloc(Ty->getNumElements())); + StructLayout::totalSizeToAlloc(Ty->getNumElements())); // Set SL before calling StructLayout's ctor. The ctor could cause other // entries to be added to TheMap, invalidating our reference. diff --git a/llvm/lib/IR/Type.cpp b/llvm/lib/IR/Type.cpp --- a/llvm/lib/IR/Type.cpp +++ b/llvm/lib/IR/Type.cpp @@ -63,6 +63,12 @@ return false; } +bool Type::isScalableTy() const { + if (const auto *STy = dyn_cast(this)) + return STy->containsScalableVectorType(); + return getTypeID() == ScalableVectorTyID || isScalableTargetExtTy(); +} + const fltSemantics &Type::getFltSemantics() const { switch (getTypeID()) { case HalfTyID: return APFloat::IEEEhalf(); @@ -462,6 +468,16 @@ return false; } +bool StructType::containsHomogeneousScalableVectorTypes() const { + Type *FirstTy = getNumElements() > 0 ? elements()[0] : nullptr; + if (!FirstTy || !isa(FirstTy)) + return false; + for (Type *Ty : elements()) + if (Ty != FirstTy) + return false; + return true; +} + void StructType::setBody(ArrayRef Elements, bool isPacked) { assert(isOpaque() && "Struct body already set!"); @@ -581,9 +597,15 @@ // Okay, our struct is sized if all of the elements are, but if one of the // elements is opaque, the struct isn't sized *yet*, but may become sized in // the future, so just bail out without caching. + // The ONLY special case of opaque types inside a Struct that is considered + // sized is when the elements are homogeneous of a scalable vector type. + if (containsHomogeneousScalableVectorTypes()) + return true; for (Type *Ty : elements()) { // If the struct contains a scalable vector type, don't consider it sized. - // This prevents it from being used in loads/stores/allocas/GEPs. + // This prevents it from being used in loads/stores/allocas/GEPs. The ONLY + // special case right now is a structure of homogenous scalable vector + // types and is handled by the if-statement before this for-loop. if (isa(Ty)) return false; if (!Ty->isSized(Visited)) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -735,6 +735,10 @@ PoisonValue::get(T), NewLoad, 0, Name)); } + // Don't unpack for structure with scalable vector. + if (ST->containsScalableVectorType()) + return nullptr; + // We don't want to break loads with padding here as we'd loose // the knowledge that padding exists for the rest of the pipeline. const DataLayout &DL = IC.getDataLayout(); @@ -1259,6 +1263,10 @@ return true; } + // Don't unpack for structure with scalable vector. + if (ST->containsScalableVectorType()) + return false; + // We don't want to break loads with padding here as we'd loose // the knowledge that padding exists for the rest of the pipeline. const DataLayout &DL = IC.getDataLayout(); diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -3936,6 +3936,9 @@ if (!STy) return nullptr; + if (STy->containsScalableVectorType()) + return nullptr; + const StructLayout *SL = DL.getStructLayout(STy); if (Offset >= SL->getSizeInBytes()) return nullptr; diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp --- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp +++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp @@ -522,6 +522,9 @@ // the struct fields. if (Ops.empty()) break; + // Don't do this for structure with scalable vector. + if (STy->containsScalableVectorType()) + break; if (const SCEVConstant *C = dyn_cast(Ops[0])) if (SE.getTypeSizeInBits(C->getType()) <= 64) { const StructLayout &SL = *DL.getStructLayout(STy); diff --git a/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/alloca-load-store-scalable-struct.ll @@ -0,0 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +target triple = "riscv64-unknown-unknown-elf" + +%struct.test = type { , } + +define @test(%struct.test* %addr, i64 %vl) { +; CHECK-LABEL: test: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrrs a2, vlenb, zero +; CHECK-NEXT: slli a2, a2, 1 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb +; CHECK-NEXT: addi a2, a0, 8 +; CHECK-NEXT: vl1re64.v v8, (a2) +; CHECK-NEXT: vl1re64.v v9, (a0) +; CHECK-NEXT: addi a0, sp, 24 +; CHECK-NEXT: vs1r.v v8, (a0) +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs1r.v v9, (a2) +; CHECK-NEXT: vl1re64.v v8, (a0) +; CHECK-NEXT: vl1re64.v v9, (a2) +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v9, v8 +; CHECK-NEXT: csrrs a0, vlenb, zero +; CHECK-NEXT: slli a0, a0, 1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %ret = alloca %struct.test, align 8 + %val = load %struct.test, %struct.test* %addr + store %struct.test %val, %struct.test* %ret, align 8 + %0 = load %struct.test, %struct.test* %ret, align 8 + %1 = extractvalue %struct.test %0, 0 + %2 = extractvalue %struct.test %0, 1 + %3 = call @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( + poison, + %1, + %2, i64 %vl) + ret %3 +} + +declare @llvm.riscv.vfadd.nxv1f64.nxv1f64.i64( + , + , + , + i64) diff --git a/llvm/test/Other/load-scalable-vector-struct.ll b/llvm/test/Other/load-scalable-vector-struct.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Other/load-scalable-vector-struct.ll @@ -0,0 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -passes=verify -S < %s 2>&1 | FileCheck %s + +%struct.test = type { , } + +define @load(%struct.test* %x) { +; CHECK-LABEL: define @load +; CHECK-SAME: (ptr [[X:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = load [[STRUCT_TEST:%.*]], ptr [[X]], align 4 +; CHECK-NEXT: [[B:%.*]] = extractvalue [[STRUCT_TEST]] [[A]], 1 +; CHECK-NEXT: ret [[B]] +; + %a = load %struct.test, %struct.test* %x + %b = extractvalue %struct.test %a, 1 + ret %b +} diff --git a/llvm/test/Other/store-scalable-vector-struct.ll b/llvm/test/Other/store-scalable-vector-struct.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Other/store-scalable-vector-struct.ll @@ -0,0 +1,18 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -passes=verify -S < %s 2>&1 | FileCheck %s + +%struct.test = type { , } + +define void @store(%struct.test* %x, %y, %z) { +; CHECK-LABEL: define void @store +; CHECK-SAME: (ptr [[X:%.*]], [[Y:%.*]], [[Z:%.*]]) { +; CHECK-NEXT: [[A:%.*]] = insertvalue [[STRUCT_TEST:%.*]] undef, [[Y]], 0 +; CHECK-NEXT: [[B:%.*]] = insertvalue [[STRUCT_TEST]] [[A]], [[Z]], 1 +; CHECK-NEXT: store [[STRUCT_TEST]] [[B]], ptr [[X]], align 4 +; CHECK-NEXT: ret void +; + %a = insertvalue %struct.test undef, %y, 0 + %b = insertvalue %struct.test %a, %z, 1 + store %struct.test %b, %struct.test* %x + ret void +}