diff --git a/mlir/lib/IR/BuiltinAttributes.cpp b/mlir/lib/IR/BuiltinAttributes.cpp --- a/mlir/lib/IR/BuiltinAttributes.cpp +++ b/mlir/lib/IR/BuiltinAttributes.cpp @@ -696,8 +696,8 @@ size_t storageBitWidth = getDenseElementStorageWidth(bitWidth); // Compress the attribute values into a character buffer. - SmallVector data(llvm::divideCeil(storageBitWidth, CHAR_BIT) * - values.size()); + SmallVector data( + llvm::divideCeil(storageBitWidth * values.size(), CHAR_BIT)); APInt intVal; for (unsigned i = 0, e = values.size(); i < e; ++i) { assert(eltType == values[i].getType() && @@ -1027,7 +1027,7 @@ template static void writeAPIntsToBuffer(size_t storageWidth, std::vector &data, APRangeT &&values) { - data.resize(llvm::divideCeil(storageWidth, CHAR_BIT) * llvm::size(values)); + data.resize(llvm::divideCeil(storageWidth * llvm::size(values), CHAR_BIT)); size_t offset = 0; for (auto it = values.begin(), e = values.end(); it != e; ++it, offset += storageWidth) { @@ -1184,7 +1184,7 @@ assert(newArrayType && "Unhandled tensor type"); size_t numRawElements = attr.isSplat() ? 1 : newArrayType.getNumElements(); - data.resize(llvm::divideCeil(storageBitWidth, CHAR_BIT) * numRawElements); + data.resize(llvm::divideCeil(storageBitWidth * numRawElements, CHAR_BIT)); // Functor used to process a single element value of the attribute. auto processElt = [&](decltype(*attr.begin()) value, size_t index) { diff --git a/mlir/test/IR/attribute-roundtrip.mlir b/mlir/test/IR/attribute-roundtrip.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/IR/attribute-roundtrip.mlir @@ -0,0 +1,10 @@ +// RUN: mlir-opt -canonicalize %s | mlir-opt | FileCheck %s + +// CHECK-LABEL: @large_i1_tensor_roundtrip +func @large_i1_tensor_roundtrip() -> tensor<160xi1> { + %cst_0 = arith.constant dense<"0xFFF00000FF000000FF000000FF000000FF000000"> : tensor<160xi1> + %cst_1 = arith.constant dense<"0xFF000000FF000000FF000000FF000000FF0000F0"> : tensor<160xi1> + // CHECK: dense<"0xFF000000FF000000FF000000FF000000FF000000"> + %0 = arith.andi %cst_0, %cst_1 : tensor<160xi1> + return %0 : tensor<160xi1> +} diff --git a/mlir/test/Target/SPIRV/constant.mlir b/mlir/test/Target/SPIRV/constant.mlir --- a/mlir/test/Target/SPIRV/constant.mlir +++ b/mlir/test/Target/SPIRV/constant.mlir @@ -159,7 +159,7 @@ spv.func @bool_vector_const() -> () "None" { // CHECK: spv.Constant dense : vector<2xi1> %0 = spv.Constant dense : vector<2xi1> - // CHECK: spv.Constant dense<[true, true, true]> : vector<3xi1> + // CHECK: spv.Constant dense : vector<3xi1> %1 = spv.Constant dense : vector<3xi1> // CHECK: spv.Constant dense<[false, true]> : vector<2xi1> %2 = spv.Constant dense<[false, true]> : vector<2xi1>