diff --git a/llvm/include/llvm/Support/LowLevelTypeImpl.h b/llvm/include/llvm/Support/LowLevelTypeImpl.h --- a/llvm/include/llvm/Support/LowLevelTypeImpl.h +++ b/llvm/include/llvm/Support/LowLevelTypeImpl.h @@ -43,7 +43,18 @@ static LLT scalar(unsigned SizeInBits) { assert(SizeInBits > 0 && "invalid scalar size"); return LLT{/*isPointer=*/false, /*isVector=*/false, - ElementCount::getFixed(0), SizeInBits, + /*isOpaque=*/false, ElementCount::getFixed(0), + SizeInBits, + /*AddressSpace=*/0}; + } + + /// Get a low-level opaque type. + static LLT opaque() { + return LLT{/*isPointer=*/false, + /*isVector=*/false, + /*isOpaque=*/true, + ElementCount::getFixed(0), + 0, /*AddressSpace=*/0}; } @@ -51,22 +62,27 @@ static LLT pointer(unsigned AddressSpace, unsigned SizeInBits) { assert(SizeInBits > 0 && "invalid pointer size"); return LLT{/*isPointer=*/true, /*isVector=*/false, - ElementCount::getFixed(0), SizeInBits, AddressSpace}; + /*isOpaque=*/false, ElementCount::getFixed(0), + SizeInBits, AddressSpace}; } /// Get a low-level vector of some number of elements and element width. static LLT vector(ElementCount EC, unsigned ScalarSizeInBits) { assert(!EC.isScalar() && "invalid number of vector elements"); assert(ScalarSizeInBits > 0 && "invalid vector element size"); - return LLT{/*isPointer=*/false, /*isVector=*/true, EC, ScalarSizeInBits, - /*AddressSpace=*/0}; + return LLT{/*isPointer=*/false, /*isVector=*/true, + /*isOpaque=*/false, EC, + ScalarSizeInBits, /*AddressSpace=*/0}; } /// Get a low-level vector of some number of elements and element type. static LLT vector(ElementCount EC, LLT ScalarTy) { assert(!EC.isScalar() && "invalid number of vector elements"); assert(!ScalarTy.isVector() && "invalid vector element type"); - return LLT{ScalarTy.isPointer(), /*isVector=*/true, EC, + return LLT{ScalarTy.isPointer(), + /*isVector=*/true, + /*isOpaque=*/false, + EC, ScalarTy.getSizeInBits().getFixedSize(), ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0}; } @@ -106,22 +122,27 @@ return scalarOrVector(EC, LLT::scalar(static_cast(ScalarSize))); } - explicit LLT(bool isPointer, bool isVector, ElementCount EC, + explicit LLT(bool isPointer, bool isVector, bool isOpaque, ElementCount EC, uint64_t SizeInBits, unsigned AddressSpace) { - init(isPointer, isVector, EC, SizeInBits, AddressSpace); + init(isPointer, isVector, isOpaque, EC, SizeInBits, AddressSpace); } - explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {} + explicit LLT() + : IsOpaque(false), IsPointer(false), IsVector(false), RawData(0) {} explicit LLT(MVT VT); bool isValid() const { return RawData != 0; } - bool isScalar() const { return isValid() && !IsPointer && !IsVector; } + bool isScalar() const { + return isValid() && !IsPointer && !IsVector && !IsOpaque; + } bool isPointer() const { return isValid() && IsPointer && !IsVector; } bool isVector() const { return isValid() && IsVector; } + bool isOpaque() const { return isValid() && IsOpaque; } + /// Returns the number of elements in a vector LLT. Must only be called on /// vector types. uint16_t getNumElements() const { @@ -264,13 +285,14 @@ /// LLT is packed into 64 bits as follows: /// isPointer : 1 /// isVector : 1 - /// with 62 bits remaining for Kind-specific data, packed in bitfields + /// isOpaque : 1 + /// with 61 bits remaining for Kind-specific data, packed in bitfields /// as described below. As there isn't a simple portable way to pack bits /// into bitfields, here the different fields in the packed structure is /// described in static const *Field variables. Each of these variables - /// is a 2-element array, with the first element describing the bitfield size + /// is a 3-element array, with the first element describing the bitfield size /// and the second element describing the bitfield offset. - typedef int BitFieldInfo[2]; + typedef int BitFieldInfo[3]; /// /// This is how the bitfields are packed per Kind: /// * Invalid: @@ -286,7 +308,7 @@ static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{ 24, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]}; static_assert((PointerAddressSpaceFieldInfo[0] + - PointerAddressSpaceFieldInfo[1]) <= 62, + PointerAddressSpaceFieldInfo[1]) <= 61, "Insufficient bits to encode all data"); /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1): /// NumElements: 16; @@ -297,7 +319,7 @@ 32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]}; static const constexpr BitFieldInfo VectorScalableFieldInfo{ 1, VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]}; - static_assert((VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]) <= 62, + static_assert((VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]) <= 61, "Insufficient bits to encode all data"); /// * Vector-of-pointer (isPointer == 1 && isVector == 1): /// NumElements: 16; @@ -314,12 +336,13 @@ 1, PointerVectorAddressSpaceFieldInfo[0] + PointerVectorAddressSpaceFieldInfo[1]}; static_assert((PointerVectorAddressSpaceFieldInfo[0] + - PointerVectorAddressSpaceFieldInfo[1]) <= 62, + PointerVectorAddressSpaceFieldInfo[1]) <= 61, "Insufficient bits to encode all data"); + uint64_t IsOpaque : 1; uint64_t IsPointer : 1; uint64_t IsVector : 1; - uint64_t RawData : 62; + uint64_t RawData : 61; static uint64_t getMask(const BitFieldInfo FieldInfo) { const int FieldSizeInBits = FieldInfo[0]; @@ -336,16 +359,19 @@ return getMask(FieldInfo) & (RawData >> FieldInfo[1]); } - void init(bool IsPointer, bool IsVector, ElementCount EC, uint64_t SizeInBits, - unsigned AddressSpace) { + void init(bool IsPointer, bool IsVector, bool IsOpaque, ElementCount EC, + uint64_t SizeInBits, unsigned AddressSpace) { assert(SizeInBits <= std::numeric_limits::max() && "Not enough bits in LLT to represent size"); this->IsPointer = IsPointer; this->IsVector = IsVector; + this->IsOpaque = IsOpaque; + if (!IsVector) { - if (!IsPointer) + if (!IsPointer) { + // scalar or opaque types RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo); - else + } else RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) | maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo); } else { diff --git a/llvm/lib/CodeGen/MachineOperand.cpp b/llvm/lib/CodeGen/MachineOperand.cpp --- a/llvm/lib/CodeGen/MachineOperand.cpp +++ b/llvm/lib/CodeGen/MachineOperand.cpp @@ -1048,9 +1048,12 @@ const MDNode *Ranges, SyncScope::ID SSID, AtomicOrdering Ordering, AtomicOrdering FailureOrdering) - : MachineMemOperand(ptrinfo, f, - s == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * s), a, - AAInfo, Ranges, SSID, Ordering, FailureOrdering) {} + : MachineMemOperand( + ptrinfo, f, + s == ~UINT64_C(0) + ? LLT() + : (s == UINT64_C(0) ? LLT::opaque() : LLT::scalar(8 * s)), + a, AAInfo, Ranges, SSID, Ordering, FailureOrdering) {} /// Profile - Gather unique data for the object. /// diff --git a/llvm/lib/Support/LowLevelType.cpp b/llvm/lib/Support/LowLevelType.cpp --- a/llvm/lib/Support/LowLevelType.cpp +++ b/llvm/lib/Support/LowLevelType.cpp @@ -17,16 +17,20 @@ LLT::LLT(MVT VT) { if (VT.isVector()) { - init(/*IsPointer=*/false, VT.getVectorNumElements() > 1, + init(/*IsPointer=*/false, VT.getVectorNumElements() > 1, /*IsOpaque=*/false, VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(), /*AddressSpace=*/0); } else if (VT.isValid()) { // Aggregates are no different from real scalars as far as GlobalISel is // concerned. - assert(VT.getSizeInBits().isNonZero() && "invalid zero-sized type"); - init(/*IsPointer=*/false, /*IsVector=*/false, ElementCount::getFixed(0), - VT.getSizeInBits(), /*AddressSpace=*/0); + if (VT.getSizeInBits().isNonZero()) + init(/*IsPointer=*/false, /*IsVector=*/false, /*IsVector=*/false, + ElementCount::getFixed(0), VT.getSizeInBits(), /*AddressSpace=*/0); + else + init(/*IsPointer=*/false, /*IsVector=*/false, /*IsOpaque=*/true, + ElementCount::getFixed(0), VT.getSizeInBits(), /*AddressSpace=*/0); } else { + IsOpaque = false; IsPointer = false; IsVector = false; RawData = 0; @@ -39,6 +43,8 @@ OS << getElementCount() << " x " << getElementType() << ">"; } else if (isPointer()) OS << "p" << getAddressSpace(); + else if (isOpaque()) + OS << ""; else if (isValid()) { assert(isScalar() && "unexpected type"); OS << "s" << getScalarSizeInBits();