diff --git a/llvm/include/llvm/Support/LowLevelTypeImpl.h b/llvm/include/llvm/Support/LowLevelTypeImpl.h
--- a/llvm/include/llvm/Support/LowLevelTypeImpl.h
+++ b/llvm/include/llvm/Support/LowLevelTypeImpl.h
@@ -41,8 +41,7 @@
 public:
   /// Get a low-level scalar or aggregate "bag of bits".
   static LLT scalar(unsigned SizeInBits) {
-    assert(SizeInBits > 0 && "invalid scalar size");
-    return LLT{/*isPointer=*/false, /*isVector=*/false,
+    return LLT{/*isPointer=*/false, /*isVector=*/false, /*isScalar=*/true,
                ElementCount::getFixed(0), SizeInBits,
                /*AddressSpace=*/0};
   }
@@ -50,7 +49,7 @@
   /// Get a low-level pointer in the given address space.
   static LLT pointer(unsigned AddressSpace, unsigned SizeInBits) {
     assert(SizeInBits > 0 && "invalid pointer size");
-    return LLT{/*isPointer=*/true, /*isVector=*/false,
+    return LLT{/*isPointer=*/true, /*isVector=*/false, /*isScalar=*/false,
                ElementCount::getFixed(0), SizeInBits, AddressSpace};
   }
 
@@ -58,7 +57,7 @@
   static LLT vector(ElementCount EC, unsigned ScalarSizeInBits) {
     assert(!EC.isScalar() && "invalid number of vector elements");
     assert(ScalarSizeInBits > 0 && "invalid vector element size");
-    return LLT{/*isPointer=*/false, /*isVector=*/true, EC, ScalarSizeInBits,
+    return LLT{/*isPointer=*/false, /*isVector=*/true, /*isScalar=*/false, EC, ScalarSizeInBits,
                /*AddressSpace=*/0};
   }
 
@@ -66,7 +65,7 @@
   static LLT vector(ElementCount EC, LLT ScalarTy) {
     assert(!EC.isScalar() && "invalid number of vector elements");
     assert(!ScalarTy.isVector() && "invalid vector element type");
-    return LLT{ScalarTy.isPointer(), /*isVector=*/true, EC,
+    return LLT{ScalarTy.isPointer(), /*isVector=*/true, /*isScalar=*/false, EC,
                ScalarTy.getSizeInBits().getFixedSize(),
                ScalarTy.isPointer() ? ScalarTy.getAddressSpace() : 0};
   }
@@ -106,17 +105,19 @@
     return scalarOrVector(EC, LLT::scalar(static_cast<unsigned>(ScalarSize)));
   }
 
-  explicit LLT(bool isPointer, bool isVector, ElementCount EC,
+  explicit LLT(bool isPointer, bool isVector, bool isScalar, ElementCount EC,
                uint64_t SizeInBits, unsigned AddressSpace) {
-    init(isPointer, isVector, EC, SizeInBits, AddressSpace);
+    init(isPointer, isVector, isScalar, EC, SizeInBits, AddressSpace);
   }
-  explicit LLT() : IsPointer(false), IsVector(false), RawData(0) {}
+  explicit LLT() : IsScalar(false), IsPointer(false), IsVector(false), RawData(0) {}
 
   explicit LLT(MVT VT);
 
-  bool isValid() const { return RawData != 0; }
+  bool isValid() const { return IsScalar || RawData != 0; }
 
-  bool isScalar() const { return isValid() && !IsPointer && !IsVector; }
+  bool isScalar() const {
+    return isValid() && IsScalar && !IsPointer && !IsVector;
+  }
 
   bool isPointer() const { return isValid() && IsPointer && !IsVector; }
 
@@ -196,6 +197,8 @@
   /// not attempt to handle cases that aren't evenly divisible.
   LLT divide(int Factor) const {
     assert(Factor != 1);
+    assert((!isScalar() || getScalarSizeInBits() != 0) &&
+           "cannot divide scalar of size zero");
     if (isVector()) {
       assert(getElementCount().isKnownMultipleOf(Factor));
       return scalarOrVector(getElementCount().divideCoefficientBy(Factor),
@@ -209,18 +212,17 @@
   bool isByteSized() const { return getSizeInBits().isKnownMultipleOf(8); }
 
   unsigned getScalarSizeInBits() const {
-    assert(RawData != 0 && "Invalid Type");
-    if (!IsVector) {
-      if (!IsPointer)
-        return getFieldValue(ScalarSizeFieldInfo);
-      else
-        return getFieldValue(PointerSizeFieldInfo);
-    } else {
+    if (IsScalar)
+      return getFieldValue(ScalarSizeFieldInfo);
+    else if (IsVector) {
       if (!IsPointer)
         return getFieldValue(VectorSizeFieldInfo);
       else
         return getFieldValue(PointerVectorSizeFieldInfo);
-    }
+    } else if (IsPointer)
+      return getFieldValue(PointerSizeFieldInfo);
+    else
+      llvm_unreachable("unexpected LLT");
   }
 
   unsigned getAddressSpace() const {
@@ -251,7 +253,7 @@
 #endif
 
   bool operator==(const LLT &RHS) const {
-    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector &&
+    return IsPointer == RHS.IsPointer && IsVector == RHS.IsVector && IsScalar == RHS.IsScalar &&
            RHS.RawData == RawData;
   }
 
@@ -262,9 +264,10 @@
 
 private:
   /// LLT is packed into 64 bits as follows:
+  /// isScalar : 1
   /// isPointer : 1
   /// isVector  : 1
-  /// with 62 bits remaining for Kind-specific data, packed in bitfields
+  /// with 61 bits remaining for Kind-specific data, packed in bitfields
   /// as described below. As there isn't a simple portable way to pack bits
   /// into bitfields, here the different fields in the packed structure is
   /// described in static const *Field variables. Each of these variables
@@ -286,7 +289,7 @@
   static const constexpr BitFieldInfo PointerAddressSpaceFieldInfo{
       24, PointerSizeFieldInfo[0] + PointerSizeFieldInfo[1]};
   static_assert((PointerAddressSpaceFieldInfo[0] +
-                 PointerAddressSpaceFieldInfo[1]) <= 62,
+                 PointerAddressSpaceFieldInfo[1]) <= 61,
                 "Insufficient bits to encode all data");
   /// * Vector-of-non-pointer (isPointer == 0 && isVector == 1):
   ///   NumElements: 16;
@@ -297,7 +300,7 @@
       32, VectorElementsFieldInfo[0] + VectorElementsFieldInfo[1]};
   static const constexpr BitFieldInfo VectorScalableFieldInfo{
       1, VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]};
-  static_assert((VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]) <= 62,
+  static_assert((VectorSizeFieldInfo[0] + VectorSizeFieldInfo[1]) <= 61,
                 "Insufficient bits to encode all data");
   /// * Vector-of-pointer (isPointer == 1 && isVector == 1):
   ///   NumElements: 16;
@@ -314,12 +317,13 @@
       1, PointerVectorAddressSpaceFieldInfo[0] +
              PointerVectorAddressSpaceFieldInfo[1]};
   static_assert((PointerVectorAddressSpaceFieldInfo[0] +
-                 PointerVectorAddressSpaceFieldInfo[1]) <= 62,
+                 PointerVectorAddressSpaceFieldInfo[1]) <= 61,
                 "Insufficient bits to encode all data");
 
+  uint64_t IsScalar : 1;
   uint64_t IsPointer : 1;
   uint64_t IsVector : 1;
-  uint64_t RawData : 62;
+  uint64_t RawData : 61;
 
   static uint64_t getMask(const BitFieldInfo FieldInfo) {
     const int FieldSizeInBits = FieldInfo[0];
@@ -336,19 +340,16 @@
     return getMask(FieldInfo) & (RawData >> FieldInfo[1]);
   }
 
-  void init(bool IsPointer, bool IsVector, ElementCount EC, uint64_t SizeInBits,
+  void init(bool IsPointer, bool IsVector, bool IsScalar, ElementCount EC, uint64_t SizeInBits,
             unsigned AddressSpace) {
     assert(SizeInBits <= std::numeric_limits<unsigned>::max() &&
            "Not enough bits in LLT to represent size");
     this->IsPointer = IsPointer;
     this->IsVector = IsVector;
-    if (!IsVector) {
-      if (!IsPointer)
-        RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
-      else
-        RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
-                  maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
-    } else {
+    this->IsScalar = IsScalar;
+    if (IsScalar)
+      RawData = maskAndShift(SizeInBits, ScalarSizeFieldInfo);
+    else if (IsVector) {
       assert(EC.isVector() && "invalid number of vector elements");
       if (!IsPointer)
         RawData =
@@ -364,12 +365,17 @@
             maskAndShift(EC.isScalable() ? 1 : 0,
                          PointerVectorScalableFieldInfo);
     }
+    else if (IsPointer)
+      RawData = maskAndShift(SizeInBits, PointerSizeFieldInfo) |
+        maskAndShift(AddressSpace, PointerAddressSpaceFieldInfo);
+    else
+      llvm_unreachable("unexpected LLT configuration");
   }
 
 public:
   uint64_t getUniqueRAWLLTData() const {
-    return ((uint64_t)RawData) << 2 | ((uint64_t)IsPointer) << 1 |
-           ((uint64_t)IsVector);
+    return ((uint64_t)RawData) << 3 | ((uint64_t)IsScalar) << 2 |
+           ((uint64_t)IsPointer) << 1 | ((uint64_t)IsVector);
   }
 };
 
diff --git a/llvm/lib/Support/LowLevelType.cpp b/llvm/lib/Support/LowLevelType.cpp
--- a/llvm/lib/Support/LowLevelType.cpp
+++ b/llvm/lib/Support/LowLevelType.cpp
@@ -17,16 +17,17 @@
 
 LLT::LLT(MVT VT) {
   if (VT.isVector()) {
-    init(/*IsPointer=*/false, VT.getVectorNumElements() > 1,
+    bool asVector = VT.getVectorNumElements() > 1;
+    init(/*IsPointer=*/false, asVector, /*IsScalar=*/!asVector,
          VT.getVectorElementCount(), VT.getVectorElementType().getSizeInBits(),
          /*AddressSpace=*/0);
   } else if (VT.isValid()) {
     // Aggregates are no different from real scalars as far as GlobalISel is
     // concerned.
-    assert(VT.getSizeInBits().isNonZero() && "invalid zero-sized type");
-    init(/*IsPointer=*/false, /*IsVector=*/false, ElementCount::getFixed(0),
-         VT.getSizeInBits(), /*AddressSpace=*/0);
+    init(/*IsPointer=*/false, /*IsVector=*/false, /*IsScalar=*/true,
+         ElementCount::getFixed(0), VT.getSizeInBits(), /*AddressSpace=*/0);
   } else {
+    IsScalar = false;
     IsPointer = false;
     IsVector = false;
     RawData = 0;
diff --git a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
--- a/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
+++ b/llvm/unittests/CodeGen/LowLevelTypeTest.cpp
@@ -22,7 +22,7 @@
   LLVMContext C;
   DataLayout DL("");
 
-  for (unsigned S : {1U, 17U, 32U, 64U, 0xfffffU}) {
+  for (unsigned S : {0U, 1U, 17U, 32U, 64U, 0xfffffU}) {
     const LLT Ty = LLT::scalar(S);
 
     // Test kind.
@@ -41,8 +41,10 @@
     EXPECT_FALSE(Ty != Ty);
 
     // Test Type->LLT conversion.
-    Type *IRTy = IntegerType::get(C, S);
-    EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
+    if (S != 0) {
+      Type *IRTy = IntegerType::get(C, S);
+      EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
+    }
   }
 }