diff --git a/mlir/lib/Dialect/Vector/VectorOps.cpp b/mlir/lib/Dialect/Vector/VectorOps.cpp --- a/mlir/lib/Dialect/Vector/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/VectorOps.cpp @@ -1483,6 +1483,10 @@ } static LogicalResult verify(TypeCastOp op) { + MemRefType canonicalType = canonicalizeStridedLayout(op.getMemRefType()); + if (!canonicalType.getAffineMaps().empty()) + return op.emitOpError("expects operand to be a memref with no layout"); + auto resultType = inferVectorTypeCastResultType(op.getMemRefType()); if (op.getResultMemRefType() != resultType) return op.emitOpError("expects result type to be: ") << resultType; diff --git a/mlir/test/Dialect/Vector/invalid.mlir b/mlir/test/Dialect/Vector/invalid.mlir --- a/mlir/test/Dialect/Vector/invalid.mlir +++ b/mlir/test/Dialect/Vector/invalid.mlir @@ -1046,3 +1046,10 @@ // expected-error@+1 {{'vector.reduction' op unsupported reduction rank: 2}} %0 = vector.reduction "add", %arg0 : vector<4x16xf32> into f32 } + +// ----- + +func @type_cast_layout(%arg0: memref<4x3xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + s2)>>) { + // expected-error@+1 {{expects operand to be a memref with no layout}} + %0 = vector.type_cast %arg0: memref<4x3xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + s2)>> to memref> +}