diff --git a/mlir/lib/IR/BuiltinTypes.cpp b/mlir/lib/IR/BuiltinTypes.cpp --- a/mlir/lib/IR/BuiltinTypes.cpp +++ b/mlir/lib/IR/BuiltinTypes.cpp @@ -946,7 +946,7 @@ ArrayRef exprs, MLIRContext *context) { // Size 0 corner case is useful for canonicalizations. - if (sizes.empty() || llvm::is_contained(sizes, 0)) + if (sizes.empty()) return getAffineConstantExpr(0, context); assert(!exprs.empty() && "expected exprs"); @@ -959,9 +959,6 @@ int64_t runningSize = 1; for (auto en : llvm::zip(llvm::reverse(exprs), llvm::reverse(sizes))) { int64_t size = std::get<1>(en); - // Degenerate case, no size =-> no stride - if (size == 0) - continue; AffineExpr dimExpr = std::get<0>(en); AffineExpr stride = dynamicPoisonBit ? getAffineSymbolExpr(nSymbols++, context) diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir --- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir +++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir @@ -99,11 +99,11 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(0 : index) : i64 + // CHECK: llvm.mlir.constant(1 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: llvm.mlir.constant(0 : index) : i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> - // CHECK: llvm.mlir.constant(0 : index) : i64 + // CHECK: llvm.mlir.constant(4 : index) : i64 // CHECK: = llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> %0 = memref.view %mem[%offset][] : memref<0xi8> to memref<0x4xf32>