diff --git a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td --- a/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td +++ b/mlir/include/mlir/Dialect/StandardOps/IR/Ops.td @@ -2982,63 +2982,58 @@ DeclareOpInterfaceMethods, NoSideEffect]> { let summary = "memref view operation"; let description = [{ - The "view" operation converts a 1-D memref with i8 element type, - to an N-D memref with arbitrary element type. In addition, the ViewOp - supports the following arguments: - *) A single dynamic offset operand can be specified which represents a - a dynamic offset within the base 1-D memref at which to create the - resulting memref view. - *) A dynamic size operand must be specified for each dynamic dimension + The "view" operation extracts an N-D contiguous memref with empty layout map + with arbitrary element type from a 1-D contiguous memref with empty layout + map of i8 element type. The ViewOp supports the following arguments: + *) A single dynamic byte-shift operand must be specified which represents a + a shift of the base 1-D memref pointer from which to create the resulting + contiguous memref view with identity layout. + *) A dynamic size operand that must be specified for each dynamic dimension in the resulting view memref type. + The "view" operation gives a structured indexing form to a flat 1-D buffer. + Unlike "subview" it can perform a type change. The type change behavior + requires the op to have special semantics because, e.g. a byte shift of 3 + cannot be represented as an offset on f64. + For now, a "view" op: + 1) Only takes a contiguous source memref with 0 offset and empty layout. + 2) Must specify a byte_shift operand (in the future, a special integer + attribute may be added to support the folded case). + 3) Returns a contiguous memref with 0 offset and empty layout. + Example: ```mlir // Allocate a flat 1D/i8 memref. %0 = alloc() : memref<2048xi8> - // ViewOp with static offset and sizes. - %1 = view %0[][] : memref<2048xi8> to memref<64x4xf32> - - // ViewOp with dynamic offset and one dynamic size. - %2 = view %0[%offset_1024][%size0] - : memref<2048xi8> to memref (d0 * 4 + d1 + s0)> + // ViewOp with dynamic offset and static sizes. + %1 = view %0[%offset_1024][] : memref<2048xi8> to memref<64x4xf32> - // ViewOp creating 3D shape where two of the dim sizes are dynamic. - // *) The dynamic offset specified in the ViewOp is applied to the - // base 1-D memref, and is represented by the symbol 's0' in the - // layout map of the ViewOp result memref type. - // *) The dynamic size for the second dimension induces a dynamic - // stride for the first dimension, which is represented by the - // symbol 's1' in the layout map of the ViewOp result memref type. - // Note that this dynamic stride will be computed from the view - // shape and dynamic sizes. - %3 = view %0[%offset_1024][%size0, %size1] - : memref<2048xi8> to memref (d0 * s1 + d1 * 4 + d2 + s0)> + // ViewOp with dynamic offset and two dynamic size. + %2 = view %0[%offset_1024][%size0, %size1] : + memref<2048xi8> to memref ``` }]; let arguments = (ins MemRefRankOf<[I8], [1]>:$source, - Variadic:$operands); + Index:$byte_shift, + Variadic:$sizes); let results = (outs AnyMemRef); let extraClassDeclaration = [{ /// The result of a view is always a memref. MemRefType getType() { return getResult().getType().cast(); } - /// Returns the dynamic offset for this view operation if specified. - /// Returns nullptr if no dynamic offset was specified. - Value getDynamicOffset(); - - /// Returns the starting operand list position of the dynamic size operands. - unsigned getDynamicSizesOperandStart() { - return getDynamicOffset() == nullptr ? 1 : 2; - } - - /// Returns the dynamic sizes for this view operation. + /// Returns the dynamic sizes for this view operation. This is redundant + /// with `sizes` but needed in template implementations. More specifically: + /// ``` + /// template + /// bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index, + /// Region *region) + /// ``` operand_range getDynamicSizes() { - return {operand_begin() + getDynamicSizesOperandStart(), operand_end()}; + return {sizes().begin(), sizes().end()}; } }]; diff --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp --- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp +++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp @@ -2676,35 +2676,31 @@ auto successStrides = getStridesAndOffset(viewMemRefType, strides, offset); if (failed(successStrides)) return op->emitWarning("cannot cast to non-strided shape"), failure(); + assert(offset == 0 && "expected offset to be 0"); // Create the descriptor. MemRefDescriptor sourceMemRef(adaptor.source()); auto targetMemRef = MemRefDescriptor::undef(rewriter, loc, targetDescTy); // Field 1: Copy the allocated pointer, used for malloc/free. - Value extracted = sourceMemRef.allocatedPtr(rewriter, loc); + Value allocatedPtr = sourceMemRef.allocatedPtr(rewriter, loc); Value bitcastPtr = rewriter.create( - loc, targetElementTy.getPointerTo(), extracted); + loc, targetElementTy.getPointerTo(), allocatedPtr); targetMemRef.setAllocatedPtr(rewriter, loc, bitcastPtr); // Field 2: Copy the actual aligned pointer to payload. - extracted = sourceMemRef.alignedPtr(rewriter, loc); + Value alignedPtr = sourceMemRef.alignedPtr(rewriter, loc); + alignedPtr = rewriter.create(loc, alignedPtr.getType(), + alignedPtr, adaptor.byte_shift()); bitcastPtr = rewriter.create( - loc, targetElementTy.getPointerTo(), extracted); + loc, targetElementTy.getPointerTo(), alignedPtr); targetMemRef.setAlignedPtr(rewriter, loc, bitcastPtr); - // Field 3: Copy the offset in aligned pointer. - unsigned numDynamicSizes = llvm::size(viewOp.getDynamicSizes()); - (void)numDynamicSizes; - bool hasDynamicOffset = offset == MemRefType::getDynamicStrideOrOffset(); - auto sizeAndOffsetOperands = adaptor.operands(); - assert(llvm::size(sizeAndOffsetOperands) == - numDynamicSizes + (hasDynamicOffset ? 1 : 0)); - Value baseOffset = !hasDynamicOffset - ? createIndexConstant(rewriter, loc, offset) - // TODO(ntv): better adaptor. - : sizeAndOffsetOperands.front(); - targetMemRef.setOffset(rewriter, loc, baseOffset); + // Field 3: The offset in the resulting type must be 0. This is because of + // the type change: an offset on srcType* may not be expressible as an + // offset on dstType*. + targetMemRef.setOffset(rewriter, loc, + createIndexConstant(rewriter, loc, offset)); // Early exit for 0-D corner case. if (viewMemRefType.getRank() == 0) @@ -2714,14 +2710,10 @@ if (strides.back() != 1) return op->emitWarning("cannot cast to non-contiguous shape"), failure(); Value stride = nullptr, nextSize = nullptr; - // Drop the dynamic stride from the operand list, if present. - ArrayRef sizeOperands(sizeAndOffsetOperands); - if (hasDynamicOffset) - sizeOperands = sizeOperands.drop_front(); for (int i = viewMemRefType.getRank() - 1; i >= 0; --i) { // Update size. Value size = - getSize(rewriter, loc, viewMemRefType.getShape(), sizeOperands, i); + getSize(rewriter, loc, viewMemRefType.getShape(), adaptor.sizes(), i); targetMemRef.setSize(rewriter, loc, i, size); // Update stride. stride = getStride(rewriter, loc, strides, nextSize, stride, i); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp --- a/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp @@ -169,7 +169,7 @@ dynamicBuffers, folder, alignment); auto fullLocalView = folded_std_view( folder, MemRefType::get(dynSizes, viewType.getElementType()), buffer, - fullSizes); + folded_std_constant_index(folder, 0), fullSizes); SmallVector zeros(fullSizes.size(), zero); SmallVector ones(fullSizes.size(), one); auto partialLocalView = diff --git a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp --- a/mlir/lib/Dialect/StandardOps/IR/Ops.cpp +++ b/mlir/lib/Dialect/StandardOps/IR/Ops.cpp @@ -2759,8 +2759,8 @@ parser.parseOperandList(offsetInfo, OpAsmParser::Delimiter::Square)) return failure(); - if (offsetInfo.size() > 1) - return parser.emitError(offsetLoc) << "expects 0 or 1 offset operand"; + if (offsetInfo.size() != 1) + return parser.emitError(offsetLoc) << "expects 1 offset operand"; return failure( parser.parseOperandList(sizesInfo, OpAsmParser::Delimiter::Square) || @@ -2775,44 +2775,15 @@ static void print(OpAsmPrinter &p, ViewOp op) { p << op.getOperationName() << ' ' << op.getOperand(0) << '['; - auto dynamicOffset = op.getDynamicOffset(); - if (dynamicOffset != nullptr) - p.printOperand(dynamicOffset); - p << "][" << op.getDynamicSizes() << ']'; + p.printOperand(op.byte_shift()); + p << "][" << op.sizes() << ']'; p.printOptionalAttrDict(op.getAttrs()); p << " : " << op.getOperand(0).getType() << " to " << op.getType(); } -Value ViewOp::getDynamicOffset() { - int64_t offset; - SmallVector strides; - auto result = - succeeded(mlir::getStridesAndOffset(getType(), strides, offset)); - assert(result); - if (result && offset == MemRefType::getDynamicStrideOrOffset()) - return getOperand(1); - return nullptr; -} - -static LogicalResult verifyDynamicStrides(MemRefType memrefType, - ArrayRef strides) { - unsigned rank = memrefType.getRank(); - assert(rank == strides.size()); - bool dynamicStrides = false; - for (int i = rank - 2; i >= 0; --i) { - // If size at dim 'i + 1' is dynamic, set the 'dynamicStrides' flag. - if (memrefType.isDynamicDim(i + 1)) - dynamicStrides = true; - // If stride at dim 'i' is not dynamic, return error. - if (dynamicStrides && strides[i] != MemRefType::getDynamicStrideOrOffset()) - return failure(); - } - return success(); -} - static LogicalResult verify(ViewOp op) { auto baseType = op.getOperand(0).getType().cast(); - auto viewType = op.getResult().getType().cast(); + auto viewType = op.getType(); // The base memref should have identity layout map (or none). if (baseType.getAffineMaps().size() > 1 || @@ -2820,32 +2791,24 @@ !baseType.getAffineMaps()[0].isIdentity())) return op.emitError("unsupported map for base memref type ") << baseType; + // The result memref should have identity layout map (or none). + if (viewType.getAffineMaps().size() > 1 || + (viewType.getAffineMaps().size() == 1 && + !viewType.getAffineMaps()[0].isIdentity())) + return op.emitError("unsupported map for result memref type ") << viewType; + // The base memref and the view memref should be in the same memory space. if (baseType.getMemorySpace() != viewType.getMemorySpace()) return op.emitError("different memory spaces specified for base memref " "type ") << baseType << " and view memref type " << viewType; - // Verify that the result memref type has a strided layout map. - int64_t offset; - SmallVector strides; - if (failed(getStridesAndOffset(viewType, strides, offset))) - return op.emitError("result type ") << viewType << " is not strided"; - - // Verify that we have the correct number of operands for the result type. - unsigned memrefOperandCount = 1; + // Verify that we have the correct number of sizes for the result type. unsigned numDynamicDims = viewType.getNumDynamicDims(); - unsigned dynamicOffsetCount = - offset == MemRefType::getDynamicStrideOrOffset() ? 1 : 0; - if (op.getNumOperands() != - memrefOperandCount + numDynamicDims + dynamicOffsetCount) - return op.emitError("incorrect number of operands for type ") << viewType; - - // Verify dynamic strides symbols were added to correct dimensions based - // on dynamic sizes. - if (failed(verifyDynamicStrides(viewType, strides))) - return op.emitError("incorrect dynamic strides in view memref type ") + if (op.sizes().size() != numDynamicDims) + return op.emitError("incorrect number of size operands for type ") << viewType; + return success(); } @@ -2866,42 +2829,23 @@ // Get result memref type. auto memrefType = viewOp.getType(); - if (memrefType.getAffineMaps().size() > 1) - return failure(); - auto map = memrefType.getAffineMaps().empty() - ? AffineMap::getMultiDimIdentityMap(memrefType.getRank(), - rewriter.getContext()) - : memrefType.getAffineMaps()[0]; // Get offset from old memref view type 'memRefType'. int64_t oldOffset; SmallVector oldStrides; if (failed(getStridesAndOffset(memrefType, oldStrides, oldOffset))) return failure(); + assert(oldOffset == 0 && "Expected 0 offset"); SmallVector newOperands; - // Fold dynamic offset operand if it is produced by a constant. - auto dynamicOffset = viewOp.getDynamicOffset(); - int64_t newOffset = oldOffset; - unsigned dynamicOffsetOperandCount = 0; - if (dynamicOffset != nullptr) { - auto *defOp = dynamicOffset.getDefiningOp(); - if (auto constantIndexOp = dyn_cast_or_null(defOp)) { - // Dynamic offset will be folded into the map. - newOffset = constantIndexOp.getValue(); - } else { - // Unable to fold dynamic offset. Add it to 'newOperands' list. - newOperands.push_back(dynamicOffset); - dynamicOffsetOperandCount = 1; - } - } + // Offset cannot be folded into result type. // Fold any dynamic dim operands which are produced by a constant. SmallVector newShapeConstants; newShapeConstants.reserve(memrefType.getRank()); - unsigned dynamicDimPos = viewOp.getDynamicSizesOperandStart(); + unsigned dynamicDimPos = 0; unsigned rank = memrefType.getRank(); for (unsigned dim = 0, e = rank; dim < e; ++dim) { int64_t dimSize = memrefType.getDimSize(dim); @@ -2910,46 +2854,29 @@ newShapeConstants.push_back(dimSize); continue; } - auto *defOp = viewOp.getOperand(dynamicDimPos).getDefiningOp(); + auto *defOp = viewOp.sizes()[dynamicDimPos].getDefiningOp(); if (auto constantIndexOp = dyn_cast_or_null(defOp)) { // Dynamic shape dimension will be folded. newShapeConstants.push_back(constantIndexOp.getValue()); } else { // Dynamic shape dimension not folded; copy operand from old memref. newShapeConstants.push_back(dimSize); - newOperands.push_back(viewOp.getOperand(dynamicDimPos)); + newOperands.push_back(viewOp.sizes()[dynamicDimPos]); } dynamicDimPos++; } - // Compute new strides based on 'newShapeConstants'. - SmallVector newStrides(rank); - newStrides[rank - 1] = 1; - bool dynamicStrides = false; - for (int i = rank - 2; i >= 0; --i) { - if (ShapedType::isDynamic(newShapeConstants[i + 1])) - dynamicStrides = true; - if (dynamicStrides) - newStrides[i] = MemRefType::getDynamicStrideOrOffset(); - else - newStrides[i] = newShapeConstants[i + 1] * newStrides[i + 1]; - } - - // Regenerate strided layout map with 'newStrides' and 'newOffset'. - map = makeStridedLinearLayoutMap(newStrides, newOffset, - rewriter.getContext()); - - // Create new memref type with constant folded dims and/or offset/strides. - MemRefType newMemRefType = MemRefType::Builder(memrefType) - .setShape(newShapeConstants) - .setAffineMaps({map}); - (void)dynamicOffsetOperandCount; // unused in opt mode - assert(static_cast(newOperands.size()) == - dynamicOffsetOperandCount + newMemRefType.getNumDynamicDims()); + // Create new memref type with constant folded dims. + MemRefType newMemRefType = + MemRefType::Builder(memrefType).setShape(newShapeConstants); + // Nothing new, don't fold. + if (newMemRefType == memrefType) + return failure(); // Create new ViewOp. auto newViewOp = rewriter.create(viewOp.getLoc(), newMemRefType, - viewOp.getOperand(0), newOperands); + viewOp.getOperand(0), + viewOp.byte_shift(), newOperands); // Insert a cast so we have the same type as the old memref type. rewriter.replaceOpWithNewOp(viewOp, newViewOp, viewOp.getType()); @@ -2972,7 +2899,7 @@ if (!allocOp) return failure(); rewriter.replaceOpWithNewOp(viewOp, viewOp.getType(), allocOperand, - viewOp.operands()); + viewOp.byte_shift(), viewOp.sizes()); return success(); } }; diff --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir --- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir @@ -773,43 +773,30 @@ // CHECK: llvm.mlir.undef : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> %0 = alloc() : memref<2048xi8> - // Test two dynamic sizes and dynamic offset. + // Test two dynamic sizes. // CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> - // CHECK: llvm.bitcast %{{.*}} : !llvm<"i8*"> to !llvm<"float*"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %[[ARG2]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[BASE_PTR:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> + // CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm<"i8*">, !llvm.i64) -> !llvm<"i8*"> + // CHECK: %[[CAST_SHIFTED_BASE_PTR:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR]] : !llvm<"i8*"> to !llvm<"float*"> + // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR]], %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %[[C0]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mul %{{.*}}, %[[ARG1]] // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - %1 = view %0[%arg2][%arg0, %arg1] - : memref<2048xi8> to memref (d0 * s0 + d1 + s1)>> + %1 = view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref - // Test two dynamic sizes and static offset. + // Test one dynamic size. // CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> - // CHECK: llvm.bitcast %{{.*}} : !llvm<"i8*"> to !llvm<"float*"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %arg0, %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mul %{{.*}}, %[[ARG1]] - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - %2 = view %0[][%arg0, %arg1] - : memref<2048xi8> to memref (d0 * s0 + d1)>> - - // Test one dynamic size and dynamic offset. - // CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> - // CHECK: llvm.bitcast %{{.*}} : !llvm<"i8*"> to !llvm<"float*"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %[[ARG2]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[BASE_PTR_2:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> + // CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm<"i8*">, !llvm.i64) -> !llvm<"i8*"> + // CHECK: %[[CAST_SHIFTED_BASE_PTR_2:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_2]] : !llvm<"i8*"> to !llvm<"float*"> + // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_2]], %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %[[C0_2]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> @@ -817,33 +804,16 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mul %{{.*}}, %[[ARG1]] // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - %3 = view %0[%arg2][%arg1] - : memref<2048xi8> to memref<4x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s0 + d1 + s1)>> - - // Test one dynamic size and static offset. - // CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> - // CHECK: llvm.bitcast %{{.*}} : !llvm<"i8*"> to !llvm<"float*"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(16 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - %4 = view %0[][%arg0] - : memref<2048xi8> to memref (d0 * 4 + d1)>> + %3 = view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32> - // Test static sizes and static offset. + // Test static sizes. // CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> - // CHECK: llvm.bitcast %{{.*}} : !llvm<"i8*"> to !llvm<"float*"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mlir.constant(0 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[BASE_PTR_3:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> + // CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm<"i8*">, !llvm.i64) -> !llvm<"i8*"> + // CHECK: %[[CAST_SHIFTED_BASE_PTR_3:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_3]] : !llvm<"i8*"> to !llvm<"float*"> + // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_3]], %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> + // CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : index) : !llvm.i64 + // CHECK: llvm.insertvalue %[[C0_3]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mlir.constant(1 : index) : !llvm.i64 @@ -852,23 +822,7 @@ // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> // CHECK: llvm.mlir.constant(4 : index) : !llvm.i64 // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - %5 = view %0[][] - : memref<2048xi8> to memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>> - - // Test dynamic everything. - // CHECK: llvm.mlir.undef : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.extractvalue %{{.*}}[1] : !llvm<"{ i8*, i8*, i64, [1 x i64], [1 x i64] }"> - // CHECK: llvm.bitcast %{{.*}} : !llvm<"i8*"> to !llvm<"float*"> - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %[[ARG2]], %{{.*}}[2] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: %[[STRIDE_1:.*]] = llvm.mlir.constant(1 : index) : !llvm.i64 - // CHECK: llvm.insertvalue %[[STRIDE_1]], %{{.*}}[4, 1] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - // CHECK: llvm.mul %[[STRIDE_1]], %[[ARG1]] : !llvm.i64 - // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }"> - %6 = view %0[%arg2][%arg0, %arg1] - : memref<2048xi8> to memref (d0 * s0 + d1 + s1)>> + %5 = view %0[%arg2][] : memref<2048xi8> to memref<64x4xf32> return } diff --git a/mlir/test/Dialect/Linalg/affine.mlir b/mlir/test/Dialect/Linalg/affine.mlir --- a/mlir/test/Dialect/Linalg/affine.mlir +++ b/mlir/test/Dialect/Linalg/affine.mlir @@ -3,7 +3,6 @@ // Test that we can lower all the way to LLVM without crashing, don't check results here. // RUN: mlir-opt %s -convert-linalg-to-affine-loops -convert-linalg-to-llvm -o=/dev/null 2>&1 -// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> // CHECK-DAG: #[[strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> // CHECK-DAG: #[[stride2Dilation1:.*]] = affine_map<(d0, d1) -> (d0 * 2 + d1)> @@ -13,10 +12,10 @@ func @matmul(%arg0: memref, %M: index, %N: index, %K: index) { %c0 = constant 0 : index %c1 = constant 1 : index - %A = view %arg0[%c0][%M, %K] : memref to memref - %B = view %arg0[%c0][%K, %N] : memref to memref - %C = view %arg0[%c0][%M, %N] : memref to memref - linalg.matmul(%A, %B, %C) : memref, memref, memref + %A = view %arg0[%c0][%M, %K] : memref to memref + %B = view %arg0[%c0][%K, %N] : memref to memref + %C = view %arg0[%c0][%M, %N] : memref to memref + linalg.matmul(%A, %B, %C) : memref, memref, memref return } @@ -24,18 +23,18 @@ // CHECK-SAME: [[M:arg[0-9]+]]: index // CHECK-SAME: [[N:arg[0-9]+]]: index // CHECK-SAME: [[K:arg[0-9]+]]: index -// CHECK: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECK: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECK: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECK: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECK: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECK: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { // CHECK: affine.for %{{.*}} = 0 to %{{.*}} { -// CHECK-DAG: %[[a:.*]] = affine.load %[[A]][%{{.*}}, %{{.*}}] : memref -// CHECK-DAG: %[[b:.*]] = affine.load %[[B]][%{{.*}}, %{{.*}}] : memref +// CHECK-DAG: %[[a:.*]] = affine.load %[[A]][%{{.*}}, %{{.*}}] : memref +// CHECK-DAG: %[[b:.*]] = affine.load %[[B]][%{{.*}}, %{{.*}}] : memref // CHECK-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 -// CHECK-DAG: %[[c:.*]] = affine.load %[[C]][%{{.*}}, %{{.*}}] : memref +// CHECK-DAG: %[[c:.*]] = affine.load %[[C]][%{{.*}}, %{{.*}}] : memref // CHECK-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 -// CHECK: affine.store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref +// CHECK: affine.store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref func @conv_view3(%arg0: memref, %arg1: memref, %arg2: memref) { linalg.conv(%arg0, %arg1, %arg2) {strides = [2]}: memref, memref, memref diff --git a/mlir/test/Dialect/Linalg/canonicalize.mlir b/mlir/test/Dialect/Linalg/canonicalize.mlir --- a/mlir/test/Dialect/Linalg/canonicalize.mlir +++ b/mlir/test/Dialect/Linalg/canonicalize.mlir @@ -7,7 +7,7 @@ %c8 = constant 8 : index %c16 = constant 16 : index %1 = alloc (%b) : memref - %2 = view %1[][] : memref to memref<16x16xf32> + %2 = view %1[%c0][] : memref to memref<16x16xf32> %3 = memref_cast %2 : memref<16x16xf32> to memref %r0 = linalg.range %c0:%c8:%c1 : !linalg.range diff --git a/mlir/test/Dialect/Linalg/loops.mlir b/mlir/test/Dialect/Linalg/loops.mlir --- a/mlir/test/Dialect/Linalg/loops.mlir +++ b/mlir/test/Dialect/Linalg/loops.mlir @@ -30,104 +30,104 @@ func @matmul(%arg0: memref, %M: index, %N: index, %K: index) { %c0 = constant 0 : index %c1 = constant 1 : index - %A = view %arg0[%c0][%M, %K] : memref to memref - %B = view %arg0[%c0][%K, %N] : memref to memref - %C = view %arg0[%c0][%M, %N] : memref to memref - linalg.matmul(%A, %B, %C) : memref, memref, memref + %A = view %arg0[%c0][%M, %K] : memref to memref + %B = view %arg0[%c0][%K, %N] : memref to memref + %C = view %arg0[%c0][%M, %N] : memref to memref + linalg.matmul(%A, %B, %C) : memref, memref, memref return } // CHECKLOOP-LABEL: func @matmul(%{{.*}}: memref, // CHECKLOOP-SAME: [[M:arg[0-9]+]]: index // CHECKLOOP-SAME: [[N:arg[0-9]+]]: index // CHECKLOOP-SAME: [[K:arg[0-9]+]]: index -// CHECKLOOP: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKLOOP: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKLOOP: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKLOOP: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKLOOP: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKLOOP: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECKLOOP: loop.for %{{.*}} = %{{.*}} to %[[M]] step %{{.*}} { // CHECKLOOP: loop.for %{{.*}} = %{{.*}} to %[[N]] step %{{.*}} { // CHECKLOOP: loop.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { -// CHECKLOOP-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref -// CHECKLOOP-DAG: %[[b:.*]] = load %[[B]][%{{.*}}, %{{.*}}] : memref +// CHECKLOOP-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref +// CHECKLOOP-DAG: %[[b:.*]] = load %[[B]][%{{.*}}, %{{.*}}] : memref // CHECKLOOP-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 -// CHECKLOOP-DAG: %[[c:.*]] = load %[[C]][%{{.*}}, %{{.*}}] : memref +// CHECKLOOP-DAG: %[[c:.*]] = load %[[C]][%{{.*}}, %{{.*}}] : memref // CHECKLOOP-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 -// CHECKLOOP: store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref +// CHECKLOOP: store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref // CHECKPARALLEL-LABEL: func @matmul(%{{.*}}: memref, // CHECKPARALLEL-SAME: [[M:arg[0-9]+]]: index // CHECKPARALLEL-SAME: [[N:arg[0-9]+]]: index // CHECKPARALLEL-SAME: [[K:arg[0-9]+]]: index -// CHECKPARALLEL: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKPARALLEL: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKPARALLEL: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKPARALLEL: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKPARALLEL: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKPARALLEL: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECKPARALLEL: loop.parallel (%{{.*}}, %{{.*}}) = (%{{.*}}, %{{.*}}) to (%[[M]], %[[N]]) step (%{{.*}}, %{{.*}} { // CHECKPARALLEL: loop.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { -// CHECKPARALLEL-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref -// CHECKPARALLEL-DAG: %[[b:.*]] = load %[[B]][%{{.*}}, %{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[b:.*]] = load %[[B]][%{{.*}}, %{{.*}}] : memref // CHECKPARALLEL-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 -// CHECKPARALLEL-DAG: %[[c:.*]] = load %[[C]][%{{.*}}, %{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[c:.*]] = load %[[C]][%{{.*}}, %{{.*}}] : memref // CHECKPARALLEL-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 -// CHECKPARALLEL: store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref +// CHECKPARALLEL: store %[[res]], %[[C]][%{{.*}}, %{{.*}}] : memref func @matvec(%arg0: memref, %M: index, %N: index) { %c0 = constant 0 : index %c1 = constant 1 : index - %2 = view %arg0[%c0][%M, %N] : memref to memref - %3 = view %arg0[%c0][%M] : memref to memref - %4 = view %arg0[%c0][%N] : memref to memref - linalg.matvec(%2, %3, %4) : memref, memref, memref + %2 = view %arg0[%c0][%M, %N] : memref to memref + %3 = view %arg0[%c0][%M] : memref to memref + %4 = view %arg0[%c0][%N] : memref to memref + linalg.matvec(%2, %3, %4) : memref, memref, memref return } // CHECKLOOP-LABEL: func @matvec(%{{.*}}: memref, // CHECKLOOP-SAME: [[M:arg[0-9]+]]: index // CHECKLOOP-SAME: [[K:arg[0-9]+]]: index -// CHECKLOOP: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKLOOP: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKLOOP: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKLOOP: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKLOOP: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKLOOP: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECKLOOP: loop.for %{{.*}} = %{{.*}} to %[[M]] step %{{.*}} { // CHECKLOOP: loop.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { -// CHECKLOOP-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref -// CHECKLOOP-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref +// CHECKLOOP-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref +// CHECKLOOP-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref // CHECKLOOP-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 -// CHECKLOOP-DAG: %[[c:.*]] = load %[[C]][%{{.*}}] : memref +// CHECKLOOP-DAG: %[[c:.*]] = load %[[C]][%{{.*}}] : memref // CHECKLOOP-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 -// CHECKLOOP: store %[[res]], %[[C]][%{{.*}}] : memref +// CHECKLOOP: store %[[res]], %[[C]][%{{.*}}] : memref // CHECKPARALLEL-LABEL: func @matvec(%{{.*}}: memref, // CHECKPARALLEL-SAME: [[M:arg[0-9]+]]: index // CHECKPARALLEL-SAME: [[K:arg[0-9]+]]: index -// CHECKPARALLEL: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKPARALLEL: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref -// CHECKPARALLEL: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKPARALLEL: %[[A:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKPARALLEL: %[[B:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref +// CHECKPARALLEL: %[[C:.*]] = std.view %{{.*}}[{{.*}}] : memref to memref // CHECKPARALLEL: loop.parallel (%{{.*}}) = (%{{.*}}) to (%[[M]]) step (%{{.*}}) { // CHECKPARALLEL: loop.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { -// CHECKPARALLEL-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref -// CHECKPARALLEL-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[a:.*]] = load %[[A]][%{{.*}}, %{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref // CHECKPARALLEL-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 -// CHECKPARALLEL-DAG: %[[c:.*]] = load %[[C]][%{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[c:.*]] = load %[[C]][%{{.*}}] : memref // CHECKPARALLEL-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 -// CHECKPARALLEL: store %[[res]], %[[C]][%{{.*}}] : memref +// CHECKPARALLEL: store %[[res]], %[[C]][%{{.*}}] : memref func @dot(%arg0: memref, %M: index) { %c0 = constant 0 : index %c1 = constant 1 : index - %1 = view %arg0[%c0][%M] : memref to memref - %2 = view %arg0[%c0][%M] : memref to memref - %3 = view %arg0[][] : memref to memref - linalg.dot(%1, %2, %3) : memref, memref, memref + %1 = view %arg0[%c0][%M] : memref to memref + %2 = view %arg0[%c0][%M] : memref to memref + %3 = view %arg0[%c0][] : memref to memref + linalg.dot(%1, %2, %3) : memref, memref, memref return } // CHECKLOOP-LABEL: func @dot(%{{.*}}: memref, // CHECKLOOP-SAME: [[K:arg[0-9]+]]: index -// CHECKLOOP: %[[A:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECKLOOP: %[[B:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECKLOOP: %[[C:.*]] = std.view %{{.*}}[][] : memref to memref +// CHECKLOOP: %[[A:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref +// CHECKLOOP: %[[B:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref +// CHECKLOOP: %[[C:.*]] = std.view %{{.*}}[{{.*}}][] : memref to memref // CHECKLOOP: loop.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { -// CHECKLOOP-DAG: %[[a:.*]] = load %[[A]][%{{.*}}] : memref -// CHECKLOOP-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref +// CHECKLOOP-DAG: %[[a:.*]] = load %[[A]][%{{.*}}] : memref +// CHECKLOOP-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref // CHECKLOOP-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 // CHECKLOOP-DAG: %[[c:.*]] = load %[[C]][] : memref // CHECKLOOP-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 @@ -135,12 +135,12 @@ // CHECKPARALLEL-LABEL: func @dot(%{{.*}}: memref, // CHECKPARALLEL-SAME: [[K:arg[0-9]+]]: index -// CHECKPARALLEL: %[[A:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECKPARALLEL: %[[B:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref -// CHECKPARALLEL: %[[C:.*]] = std.view %{{.*}}[][] : memref to memref +// CHECKPARALLEL: %[[A:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref +// CHECKPARALLEL: %[[B:.*]] = std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref +// CHECKPARALLEL: %[[C:.*]] = std.view %{{.*}}[{{.*}}][] : memref to memref // CHECKPARALLEL: loop.for %{{.*}} = %{{.*}} to %[[K]] step %{{.*}} { -// CHECKPARALLEL-DAG: %[[a:.*]] = load %[[A]][%{{.*}}] : memref -// CHECKPARALLEL-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[a:.*]] = load %[[A]][%{{.*}}] : memref +// CHECKPARALLEL-DAG: %[[b:.*]] = load %[[B]][%{{.*}}] : memref // CHECKPARALLEL-DAG: %[[inc:.*]] = mulf %[[a]], %[[b]] : f32 // CHECKPARALLEL-DAG: %[[c:.*]] = load %[[C]][] : memref // CHECKPARALLEL-DAG: %[[res:.*]] = addf %[[c]], %[[inc]] : f32 diff --git a/mlir/test/Dialect/Linalg/promote.mlir b/mlir/test/Dialect/Linalg/promote.mlir --- a/mlir/test/Dialect/Linalg/promote.mlir +++ b/mlir/test/Dialect/Linalg/promote.mlir @@ -1,7 +1,6 @@ // RUN: mlir-opt %s -linalg-promote-subviews | FileCheck %s // RUN: mlir-opt %s -linalg-promote-subviews="test-promote-dynamic" | FileCheck %s --check-prefix=DYNAMIC -#map0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> #map1 = affine_map<(d0) -> (d0 + 2)> #map2 = affine_map<(d0) -> (d0 + 4)> #map3 = affine_map<(d0) -> (d0 + 3)> @@ -15,19 +14,19 @@ %c2 = constant 2 : index %c0 = constant 0 : index %c1 = constant 1 : index - %3 = view %A[%c0][%M, %K] : memref to memref - %4 = view %A[%c0][%K, %N] : memref to memref - %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref + %3 = view %A[%c0][%M, %K] : memref to memref + %4 = view %A[%c0][%K, %N] : memref to memref + %5 = view %A[%c0][%M, %N] : memref to memref + %6 = dim %3, 0 : memref + %7 = dim %3, 1 : memref + %8 = dim %4, 1 : memref loop.for %arg4 = %c0 to %6 step %c2 { loop.for %arg5 = %c0 to %8 step %c3 { loop.for %arg6 = %c0 to %7 step %c4 { - %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : memref to memref - %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : memref to memref - %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%11, %14, %17) : memref, memref, memref + %11 = std.subview %3[%arg4, %arg6][%c2, %c4][] : memref to memref + %14 = std.subview %4[%arg6, %arg5][%c4, %c3][] : memref to memref + %17 = std.subview %5[%arg4, %arg5][%c2, %c3][] : memref to memref + linalg.matmul(%11, %14, %17) : memref, memref, memref } } } @@ -38,35 +37,35 @@ // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { -// CHECK: %[[vA:.*]] = subview {{.*}} : memref -// CHECK: %[[vB:.*]] = subview {{.*}} : memref -// CHECK: %[[vC:.*]] = subview {{.*}} : memref +// CHECK: %[[vA:.*]] = subview {{.*}} : memref +// CHECK: %[[vB:.*]] = subview {{.*}} : memref +// CHECK: %[[vC:.*]] = subview {{.*}} : memref /// // CHECK: %[[tmpA:.*]] = alloc() : memref<32xi8> -// CHECK: %[[fullA:.*]] = std.view %[[tmpA]][][{{.*}}] : memref<32xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullA:.*]] = std.view %[[tmpA]][{{.*}}][{{.*}}] : memref<32xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialA:.*]] = subview %[[fullA]]{{.*}} : memref to memref /// // CHECK: %[[tmpB:.*]] = alloc() : memref<48xi8> -// CHECK: %[[fullB:.*]] = std.view %[[tmpB]][][{{.*}}] : memref<48xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullB:.*]] = std.view %[[tmpB]][{{.*}}][{{.*}}] : memref<48xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialB:.*]] = subview %[[fullB]]{{.*}} : memref to memref /// // CHECK: %[[tmpC:.*]] = alloc() : memref<24xi8> -// CHECK: %[[fullC:.*]] = std.view %[[tmpC]][][{{.*}}] : memref<24xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullC:.*]] = std.view %[[tmpC]][{{.*}}][{{.*}}] : memref<24xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialC:.*]] = subview %[[fullC]]{{.*}} : memref to memref // CHECK: linalg.fill(%[[fullA]], {{.*}}) : memref, f32 // CHECK: linalg.fill(%[[fullB]], {{.*}}) : memref, f32 // CHECK: linalg.fill(%[[fullC]], {{.*}}) : memref, f32 -// CHECK: linalg.copy(%[[vA]], %[[partialA]]) : memref, memref -// CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref, memref -// CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref, memref +// CHECK: linalg.copy(%[[vA]], %[[partialA]]) : memref, memref +// CHECK: linalg.copy(%[[vB]], %[[partialB]]) : memref, memref +// CHECK: linalg.copy(%[[vC]], %[[partialC]]) : memref, memref // // CHECK: linalg.matmul(%[[fullA]], %[[fullB]], %[[fullC]]) : memref, memref, memref // -// CHECK: linalg.copy(%[[partialC]], %[[vC]]) : memref, memref +// CHECK: linalg.copy(%[[partialC]], %[[vC]]) : memref, memref // // CHECK: dealloc %[[tmpA]] : memref<32xi8> // CHECK: dealloc %[[tmpB]] : memref<48xi8> @@ -80,19 +79,19 @@ %c2 = constant 2 : index %c0 = constant 0 : index %c1 = constant 1 : index - %3 = view %A[%c0][%M, %K] : memref to memref - %4 = view %A[%c0][%K, %N] : memref to memref - %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref + %3 = view %A[%c0][%M, %K] : memref to memref + %4 = view %A[%c0][%K, %N] : memref to memref + %5 = view %A[%c0][%M, %N] : memref to memref + %6 = dim %3, 0 : memref + %7 = dim %3, 1 : memref + %8 = dim %4, 1 : memref loop.for %arg4 = %c0 to %6 step %c2 { loop.for %arg5 = %c0 to %8 step %c3 { loop.for %arg6 = %c0 to %7 step %c4 { - %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : memref to memref - %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : memref to memref - %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%11, %14, %17) : memref, memref, memref + %11 = std.subview %3[%arg4, %arg6][%c2, %c4][] : memref to memref + %14 = std.subview %4[%arg6, %arg5][%c4, %c3][] : memref to memref + %17 = std.subview %5[%arg4, %arg5][%c2, %c3][] : memref to memref + linalg.matmul(%11, %14, %17) : memref, memref, memref } } } @@ -103,35 +102,35 @@ // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { -// CHECK: %[[vA_f64:.*]] = subview {{.*}} : memref -// CHECK: %[[vB_f64:.*]] = subview {{.*}} : memref -// CHECK: %[[vC_f64:.*]] = subview {{.*}} : memref +// CHECK: %[[vA_f64:.*]] = subview {{.*}} : memref +// CHECK: %[[vB_f64:.*]] = subview {{.*}} : memref +// CHECK: %[[vC_f64:.*]] = subview {{.*}} : memref /// // CHECK: %[[tmpA_f64:.*]] = alloc() : memref<64xi8> -// CHECK: %[[fullA_f64:.*]] = std.view %[[tmpA_f64]][][{{.*}}] : memref<64xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullA_f64:.*]] = std.view %[[tmpA_f64]][{{.*}}][{{.*}}] : memref<64xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialA_f64:.*]] = subview %[[fullA_f64]][%{{.*}}, %{{.*}}] : memref to memref /// // CHECK: %[[tmpB_f64:.*]] = alloc() : memref<96xi8> -// CHECK: %[[fullB_f64:.*]] = std.view %[[tmpB_f64]][][{{.*}}] : memref<96xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullB_f64:.*]] = std.view %[[tmpB_f64]][{{.*}}][{{.*}}] : memref<96xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialB_f64:.*]] = subview %[[fullB_f64]][%{{.*}}, %{{.*}}] : memref to memref /// // CHECK: %[[tmpC_f64:.*]] = alloc() : memref<48xi8> -// CHECK: %[[fullC_f64:.*]] = std.view %[[tmpC_f64]][][{{.*}}] : memref<48xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullC_f64:.*]] = std.view %[[tmpC_f64]][{{.*}}][{{.*}}] : memref<48xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialC_f64:.*]] = subview %[[fullC_f64]][%{{.*}}, %{{.*}}] : memref to memref // CHECK: linalg.fill(%[[fullA_f64]], {{.*}}) : memref, f64 // CHECK: linalg.fill(%[[fullB_f64]], {{.*}}) : memref, f64 // CHECK: linalg.fill(%[[fullC_f64]], {{.*}}) : memref, f64 -// CHECK: linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref, memref -// CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref, memref -// CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref, memref +// CHECK: linalg.copy(%[[vA_f64]], %[[partialA_f64]]) : memref, memref +// CHECK: linalg.copy(%[[vB_f64]], %[[partialB_f64]]) : memref, memref +// CHECK: linalg.copy(%[[vC_f64]], %[[partialC_f64]]) : memref, memref // // CHECK: linalg.matmul(%[[fullA_f64]], %[[fullB_f64]], %[[fullC_f64]]) : memref, memref, memref // -// CHECK: linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref, memref +// CHECK: linalg.copy(%[[partialC_f64]], %[[vC_f64]]) : memref, memref // // CHECK: dealloc %[[tmpA_f64]] : memref<64xi8> // CHECK: dealloc %[[tmpB_f64]] : memref<96xi8> @@ -145,19 +144,19 @@ %c2 = constant 2 : index %c0 = constant 0 : index %c1 = constant 1 : index - %3 = view %A[%c0][%M, %K] : memref to memref - %4 = view %A[%c0][%K, %N] : memref to memref - %5 = view %A[%c0][%M, %N] : memref to memref - %6 = dim %3, 0 : memref - %7 = dim %3, 1 : memref - %8 = dim %4, 1 : memref + %3 = view %A[%c0][%M, %K] : memref to memref + %4 = view %A[%c0][%K, %N] : memref to memref + %5 = view %A[%c0][%M, %N] : memref to memref + %6 = dim %3, 0 : memref + %7 = dim %3, 1 : memref + %8 = dim %4, 1 : memref loop.for %arg4 = %c0 to %6 step %c2 { loop.for %arg5 = %c0 to %8 step %c3 { loop.for %arg6 = %c0 to %7 step %c4 { - %11 = std.subview %3[%arg4, %arg6][%c2, %c4][%c1, %c1] : memref to memref - %14 = std.subview %4[%arg6, %arg5][%c4, %c3][%c1, %c1] : memref to memref - %17 = std.subview %5[%arg4, %arg5][%c2, %c3][%c1, %c1] : memref to memref - linalg.matmul(%11, %14, %17) : memref, memref, memref + %11 = std.subview %3[%arg4, %arg6][%c2, %c4][] : memref to memref + %14 = std.subview %4[%arg6, %arg5][%c4, %c3][] : memref to memref + %17 = std.subview %5[%arg4, %arg5][%c2, %c3][] : memref to memref + linalg.matmul(%11, %14, %17) : memref, memref, memref } } } @@ -168,35 +167,35 @@ // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { // CHECK: loop.for %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} { -// CHECK: %[[vA_i32:.*]] = subview {{.*}} : memref -// CHECK: %[[vB_i32:.*]] = subview {{.*}} : memref -// CHECK: %[[vC_i32:.*]] = subview {{.*}} : memref +// CHECK: %[[vA_i32:.*]] = subview {{.*}} : memref +// CHECK: %[[vB_i32:.*]] = subview {{.*}} : memref +// CHECK: %[[vC_i32:.*]] = subview {{.*}} : memref /// // CHECK: %[[tmpA_i32:.*]] = alloc() : memref<32xi8> -// CHECK: %[[fullA_i32:.*]] = std.view %[[tmpA_i32]][][{{.*}}] : memref<32xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullA_i32:.*]] = std.view %[[tmpA_i32]][{{.*}}][{{.*}}] : memref<32xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialA_i32:.*]] = subview %[[fullA_i32]][%{{.*}}, %{{.*}}] : memref to memref /// // CHECK: %[[tmpB_i32:.*]] = alloc() : memref<48xi8> -// CHECK: %[[fullB_i32:.*]] = std.view %[[tmpB_i32]][][{{.*}}] : memref<48xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullB_i32:.*]] = std.view %[[tmpB_i32]][{{.*}}][{{.*}}] : memref<48xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialB_i32:.*]] = subview %[[fullB_i32]][%{{.*}}, %{{.*}}] : memref to memref /// // CHECK: %[[tmpC_i32:.*]] = alloc() : memref<24xi8> -// CHECK: %[[fullC_i32:.*]] = std.view %[[tmpC_i32]][][{{.*}}] : memref<24xi8> to memref -// DYNAMIC: std.view %{{.*}}[][{{.*}}] : memref to memref +// CHECK: %[[fullC_i32:.*]] = std.view %[[tmpC_i32]][{{.*}}][{{.*}}] : memref<24xi8> to memref +// DYNAMIC: std.view %{{.*}}[{{.*}}][{{.*}}] : memref to memref // CHECK: %[[partialC_i32:.*]] = subview %[[fullC_i32]][%{{.*}}, %{{.*}}] : memref to memref // CHECK: linalg.fill(%[[fullA_i32]], {{.*}}) : memref, i32 // CHECK: linalg.fill(%[[fullB_i32]], {{.*}}) : memref, i32 // CHECK: linalg.fill(%[[fullC_i32]], {{.*}}) : memref, i32 -// CHECK: linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref, memref -// CHECK: linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref, memref -// CHECK: linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref, memref +// CHECK: linalg.copy(%[[vA_i32]], %[[partialA_i32]]) : memref, memref +// CHECK: linalg.copy(%[[vB_i32]], %[[partialB_i32]]) : memref, memref +// CHECK: linalg.copy(%[[vC_i32]], %[[partialC_i32]]) : memref, memref // // CHECK: linalg.matmul(%[[fullA_i32]], %[[fullB_i32]], %[[fullC_i32]]) : memref, memref, memref // -// CHECK: linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref, memref +// CHECK: linalg.copy(%[[partialC_i32]], %[[vC_i32]]) : memref, memref // // CHECK: dealloc %[[tmpA_i32]] : memref<32xi8> // CHECK: dealloc %[[tmpB_i32]] : memref<48xi8> diff --git a/mlir/test/Dialect/Linalg/roundtrip.mlir b/mlir/test/Dialect/Linalg/roundtrip.mlir --- a/mlir/test/Dialect/Linalg/roundtrip.mlir +++ b/mlir/test/Dialect/Linalg/roundtrip.mlir @@ -16,34 +16,31 @@ // ----- // CHECK-DAG: #[[strided1D:.*]] = affine_map<(d0)[s0] -> (d0 + s0)> -// CHECK-DAG: #[[strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> func @views(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index) { %c0 = constant 0 : index %0 = muli %arg0, %arg0 : index %1 = alloc (%0) : memref %2 = linalg.range %arg0:%arg1:%arg2 : !linalg.range - %3 = view %1[%c0][%arg0, %arg0] : - memref to memref + %3 = view %1[%c0][%arg0, %arg0] : memref to memref %4 = linalg.slice %3[%2, %2] : - memref, + memref, !linalg.range, !linalg.range, - memref - %5 = linalg.slice %3[%2, %arg2] : memref, + memref + %5 = linalg.slice %3[%2, %arg2] : memref, !linalg.range, index, memref - %6 = linalg.slice %3[%arg2, %2] : memref, + %6 = linalg.slice %3[%arg2, %2] : memref, index, !linalg.range, memref - %7 = linalg.slice %3[%arg2, %arg3] : memref, + %7 = linalg.slice %3[%arg2, %arg3] : memref, index, index, memref - %8 = view %1[%c0][%arg0, %arg0] : - memref to memref, offset: ?, strides: [?, 1]> + %8 = view %1[%c0][%arg0, %arg0] : memref to memref> dealloc %1 : memref return } @@ -52,29 +49,29 @@ // CHECK-NEXT: alloc(%{{.*}}) : memref // CHECK-NEXT: range // CHECK-NEXT: std.view %{{.*}}[%{{.*}}][%{{.*}}] : -// CHECK-SAME: memref to memref +// CHECK-SAME: memref to memref // CHECK-NEXT: linalg.slice %{{.*}}[%{{.*}}, %{{.*}}] : -// CHECK-SAME: memref, +// CHECK-SAME: memref, // CHECK-SAME: !linalg.range, // CHECK-SAME: !linalg.range, -// CHECK-SAME: memref +// CHECK-SAME: memref // CHECK-NEXT: linalg.slice %{{.*}}[%{{.*}}, %{{.*}}] : -// CHECK-SAME: memref, +// CHECK-SAME: memref, // CHECK-SAME: !linalg.range, // CHECK-SAME: index, // CHECK-SAME: memref // CHECK-NEXT: linalg.slice %{{.*}}[%{{.*}}, %{{.*}}] : -// CHECK-SAME: memref, +// CHECK-SAME: memref, // CHECK-SAME: index, // CHECK-SAME: !linalg.range, // CHECK-SAME: memref // CHECK-NEXT: linalg.slice %{{.*}}[%{{.*}}, %{{.*}}] : -// CHECK-SAME: memref, +// CHECK-SAME: memref, // CHECK-SAME: index, // CHECK-SAME: index, // CHECK-SAME: memref // CHECK-NEXT: view %{{.*}}[%{{.*}}][%{{.*}}] : -// CHECK-SAME: memref to memref, #[[strided2D]]> +// CHECK-SAME: memref to memref> // CHECK-NEXT: dealloc %{{.*}} : memref // ----- diff --git a/mlir/test/Dialect/Linalg/transform-patterns.mlir b/mlir/test/Dialect/Linalg/transform-patterns.mlir --- a/mlir/test/Dialect/Linalg/transform-patterns.mlir +++ b/mlir/test/Dialect/Linalg/transform-patterns.mlir @@ -280,13 +280,13 @@ // CHECK: %[[s1:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a0:.*]] = alloc({{%.*}}) : memref -// CHECK: %[[v0:.*]] = std.view %[[a0]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a1:.*]] = alloc({{%.*}}) : memref -// CHECK: %[[v1:.*]] = std.view %[[a1]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[l1:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a2:.*]] = alloc({{%.*}}) : memref -// CHECK: %[[v2:.*]] = std.view %[[a2]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[l2:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK: linalg.copy(%[[s1]], %[[l1]]) : memref, memref @@ -330,13 +330,13 @@ // CHECK: %[[s1:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[s2:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a0:.*]] = alloc({{%.*}}) : memref -// CHECK: %[[v0:.*]] = std.view %[[a0]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK-NOT: %[[a1:.*]] = alloc({{%.*}}) : memref -// CHECK-NOT: %[[v1:.*]] = std.view %[[a1]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK-NOT: %[[v1:.*]] = std.view %[[a1]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK-NOT: %[[l0:.*]] = subview %[[v1]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK-NOT: %[[a2:.*]] = alloc({{%.*}}) : memref -// CHECK-NOT: %[[v2:.*]] = std.view %[[a2]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK-NOT: %[[v2:.*]] = std.view %[[a2]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK-NOT: %[[l0:.*]] = subview %[[v2]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref // CHECK-NOT: linalg.copy(%[[s1]], %[[l1]]) : memref, memref @@ -359,7 +359,7 @@ // CHECK: %[[cf:.*]] = constant {{.*}} : f32 // CHECK: %[[s0:.*]] = subview {{%.*}}[{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[a0:.*]] = alloc({{%.*}}) {alignment = 32 : i64} : memref -// CHECK: %[[v0:.*]] = std.view %[[a0]][][{{%.*}}, {{%.*}}] : memref to memref +// CHECK: %[[v0:.*]] = std.view %[[a0]][{{.*}}][{{%.*}}, {{%.*}}] : memref to memref // CHECK: %[[l0:.*]] = subview %[[v0]][{{%.*}}, {{%.*}}] [{{%.*}}, {{%.*}}] : memref to memref // CHECK: linalg.fill(%[[v0]], {{%.*}}) : memref, f32 // CHECK: linalg.copy(%[[s0]], %[[l0]]) : memref, memref diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -8,10 +8,6 @@ // CHECK: #map1 = affine_map<()[s0] -> (s0 + 1)> -// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1)> -// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)> -// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)> - // CHECK-DAG: #[[BASE_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)> // CHECK-DAG: #[[BASE_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3)> // CHECK-DAG: #[[SUBVIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1, d2)[s0, s1, s2, s3] -> (d0 * s1 + d1 * s2 + d2 * s3 + s0)> @@ -692,29 +688,17 @@ func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> // Test two dynamic sizes and dynamic offset. - // CHECK: %{{.*}} = std.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref - %1 = view %0[%arg2][%arg0, %arg1] - : memref<2048xi8> to memref (d0 * s1 + d1 + s0)>> - - // Test two dynamic sizes and static offset. - // CHECK: %{{.*}} = std.view %0[][%arg0, %arg1] : memref<2048xi8> to memref - %2 = view %0[][%arg0, %arg1] - : memref<2048xi8> to memref (d0 * s0 + d1)>> + // CHECK: %{{.*}} = std.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref + %1 = view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref // Test one dynamic size and dynamic offset. - // CHECK: %{{.*}} = std.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32, #[[VIEW_MAP2]]> - %3 = view %0[%arg2][%arg1] - : memref<2048xi8> to memref<4x?xf32, affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)>> - - // Test one dynamic size and static offset. - // CHECK: %{{.*}} = std.view %0[][%arg0] : memref<2048xi8> to memref - %4 = view %0[][%arg0] - : memref<2048xi8> to memref (d0 * 4 + d1)>> + // CHECK: %{{.*}} = std.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32> + %3 = view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32> // Test static sizes and static offset. - // CHECK: %{{.*}} = std.view %0[][] : memref<2048xi8> to memref<64x4xf32, #[[VIEW_MAP1]]> - %5 = view %0[][] - : memref<2048xi8> to memref<64x4xf32, affine_map<(d0, d1) -> (d0 * 4 + d1)>> + // CHECK: %{{.*}} = std.view %0[{{.*}}][] : memref<2048xi8> to memref<64x4xf32> + %c0 = constant 0: index + %5 = view %0[%c0][] : memref<2048xi8> to memref<64x4xf32> return } diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -927,29 +927,9 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> - // expected-error@+1 {{incorrect number of operands for type}} + // expected-error@+1 {{expects 1 offset operand}} %1 = view %0[][%arg0, %arg1] - : memref<2048xi8> to memref (d0 * 4 + d1 + s0)>> - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = alloc() : memref<2048xi8> - // expected-error@+1 {{is not strided}} - %1 = view %0[][%arg0, %arg1] - : memref<2048xi8> to memref (d0, d1, s0)>> - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = alloc() : memref<2048xf32> - // expected-error@+1 {{must be 1D memref of 8-bit signless integer values}} - %1 = view %0[][%arg0, %arg1] - : memref<2048xf32> to memref (d0 * 4 + d1 + s0)>> + : memref<2048xi8> to memref return } @@ -957,8 +937,8 @@ func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> - // expected-error@+1 {{unsupported map for base memref}} - %1 = view %0[][%arg0, %arg1] + // expected-error@+1 {{unsupported map for base memref type}} + %1 = view %0[%arg2][%arg0, %arg1] : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> to memref (d0 * 4 + d1 + s0)>> return @@ -967,44 +947,29 @@ // ----- func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = alloc() : memref<2048xi8, 2> - // expected-error@+1 {{different memory spaces}} - %1 = view %0[][%arg0, %arg1] - : memref<2048xi8, 2> to - memref (d0 * 4 + d1 + s0)>, 1> - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> - // expected-error@+1 {{incorrect dynamic strides}} - %1 = view %0[][%arg0, %arg1] - : memref<2048xi8> to - memref (d0 * 777 + d1 * 4 + d2)>> + // expected-error@+1 {{unsupported map for result memref type}} + %1 = view %0[%arg2][%arg0, %arg1] + : memref<2048xi8> to memref (d0, d1, s0)>> return } // ----- func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = alloc() : memref<2048xi8> - // expected-error@+1 {{incorrect dynamic strides}} - %1 = view %0[%arg0][] - : memref<2048xi8> to - memref<16x4x?xf32, affine_map<(d0, d1, d2) -> (d0 * 777 + d1 * 4 + d2)>> + %0 = alloc() : memref<2048xi8, 2> + // expected-error@+1 {{different memory spaces}} + %1 = view %0[%arg2][%arg0, %arg1] : memref<2048xi8, 2> to memref return } // ----- -func @multiple_offsets(%arg0: index) { +func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { %0 = alloc() : memref<2048xi8> - // expected-error@+1 {{expects 0 or 1 offset operand}} - %1 = view %0[%arg0, %arg0][%arg0] - : memref<2048xi8> to - memref (d0 * 777 + d1 * 4 + d2)>> + // expected-error@+1 {{incorrect number of size operands for type}} + %1 = view %0[%arg2][%arg0] + : memref<2048xi8> to memref return } diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -445,9 +445,9 @@ %ub = dim %3, 0 : memref affine.for %arg4 = 0 to %ub { %s = dim %0, 0 : memref - %v = std.view %3[%c0][%arg4, %s] : memref to memref + %v = std.view %3[%c0][%arg4, %s] : memref to memref %sv = subview %0[%c0, %c0][%s,%arg4][%c1,%c1] : memref to memref - %l = dim %v, 1 : memref + %l = dim %v, 1 : memref %u = dim %sv, 0 : memref affine.for %arg5 = %l to %u { "foo"() : () -> () @@ -462,13 +462,13 @@ // CHECK-NEXT: } // CHECK-NEXT: } - %A = view %BUF[%c0][%M, %K] : memref to memref - %B = view %BUF[%c0][%K, %N] : memref to memref - %C = view %BUF[%c0][%M, %N] : memref to memref + %A = view %BUF[%c0][%M, %K] : memref to memref + %B = view %BUF[%c0][%K, %N] : memref to memref + %C = view %BUF[%c0][%M, %N] : memref to memref - %M_ = dim %A, 0 : memref - %K_ = dim %A, 1 : memref - %N_ = dim %C, 1 : memref + %M_ = dim %A, 0 : memref + %K_ = dim %A, 1 : memref + %N_ = dim %C, 1 : memref loop.for %i = %c0 to %M_ step %c1 { loop.for %j = %c0 to %N_ step %c1 { loop.for %k = %c0 to %K_ step %c1 { @@ -642,19 +642,9 @@ // ----- -#TEST_VIEW_MAP0 = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + d1 + s0)> -#TEST_VIEW_MAP1 = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s1 + d1 * s0 + d2)> -#TEST_VIEW_MAP2 = affine_map<(d0, d1)[s0] -> (d0 * 4 + d1 + s0)> - -// CHECK-DAG: #[[VIEW_MAP0:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 11 + d1 + 15)> -// CHECK-DAG: #[[VIEW_MAP1:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * 11 + s0 + d1)> -// CHECK-DAG: #[[VIEW_MAP2:map[0-9]+]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1 + 15)> -// CHECK-DAG: #[[VIEW_MAP3:map[0-9]+]] = affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 7 + d2)> -// CHECK-DAG: #[[VIEW_MAP4:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 4 + d1 + 15)> -// CHECK-DAG: #[[VIEW_MAP5:map[0-9]+]] = affine_map<(d0, d1) -> (d0 * 7 + d1)> - // CHECK-LABEL: func @view -func @view(%arg0 : index) -> (f32, f32, f32, f32, f32, f32) { +func @view(%arg0 : index) -> (f32, f32, f32, f32) { + // CHECK: %[[C15:.*]] = constant 15 : index // CHECK: %[[ALLOC_MEM:.*]] = alloc() : memref<2048xi8> %0 = alloc() : memref<2048xi8> %c0 = constant 0 : index @@ -662,45 +652,27 @@ %c11 = constant 11 : index %c15 = constant 15 : index - // Test: fold constant sizes and offset, update map with static stride/offset. - // CHECK: std.view %[[ALLOC_MEM]][][] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP0]]> - %1 = view %0[%c15][%c7, %c11] - : memref<2048xi8> to memref - %r0 = load %1[%c0, %c0] : memref - - // Test: fold constant sizes but not offset, update map with static stride. - // Test that we do not a fold dynamic dim which is not produced by a constant. - // CHECK: std.view %[[ALLOC_MEM]][%arg0][] : memref<2048xi8> to memref<7x11xf32, #[[VIEW_MAP1]]> - %2 = view %0[%arg0][%c7, %c11] - : memref<2048xi8> to memref - %r1 = load %2[%c0, %c0] : memref - - // Test: fold constant offset but not sizes, update map with constant offset. - // Test that we fold constant offset but not dynamic dims. - // CHECK: std.view %[[ALLOC_MEM]][][%arg0, %arg0] : memref<2048xi8> to memref - %3 = view %0[%c15][%arg0, %arg0] - : memref<2048xi8> to memref - %r2 = load %3[%c0, %c0] : memref - - // Test: fold one constant dim, no offset, should update with constant - // stride on dim 1, but leave dynamic stride on dim 0. - // CHECK: std.view %[[ALLOC_MEM]][][%arg0, %arg0] : memref<2048xi8> to memref - %4 = view %0[][%arg0, %arg0, %c7] - : memref<2048xi8> to memref - %r3 = load %4[%c0, %c0, %c0] : memref - - // Test: preserve an existing static dim size while folding a dynamic - // dimension and offset. - // CHECK: std.view %[[ALLOC_MEM]][][] : memref<2048xi8> to memref<7x4xf32, #[[VIEW_MAP4]]> - %5 = view %0[%c15][%c7] : memref<2048xi8> to memref - %r4 = load %5[%c0, %c0] : memref + // Test: fold constant sizes. + // CHECK: std.view %[[ALLOC_MEM]][%[[C15]]][] : memref<2048xi8> to memref<7x11xf32> + %1 = view %0[%c15][%c7, %c11] : memref<2048xi8> to memref + %r0 = load %1[%c0, %c0] : memref + + // Test: fold one constant size. + // CHECK: std.view %[[ALLOC_MEM]][%[[C15]]][%arg0, %arg0] : memref<2048xi8> to memref + %2 = view %0[%c15][%arg0, %arg0, %c7] : memref<2048xi8> to memref + %r1 = load %2[%c0, %c0, %c0] : memref + + // Test: preserve an existing static size. + // CHECK: std.view %[[ALLOC_MEM]][%[[C15]]][] : memref<2048xi8> to memref<7x4xf32> + %3 = view %0[%c15][%c7] : memref<2048xi8> to memref + %r2 = load %3[%c0, %c0] : memref // Test: folding static alloc and memref_cast into a view. - // CHECK: std.view %[[ALLOC_MEM]][][] : memref<2048xi8> to memref<15x7xf32, #[[VIEW_MAP5]]> - %6 = memref_cast %0 : memref<2048xi8> to memref - %7 = view %6[%c15][%c7] : memref to memref - %r5 = load %7[%c0, %c0] : memref - return %r0, %r1, %r2, %r3, %r4, %r5 : f32, f32, f32, f32, f32, f32 + // CHECK: std.view %[[ALLOC_MEM]][%[[C15]]][] : memref<2048xi8> to memref<15x7xf32> + %4 = memref_cast %0 : memref<2048xi8> to memref + %5 = view %4[%c15][%c15, %c7] : memref to memref + %r3 = load %5[%c0, %c0] : memref + return %r0, %r1, %r2, %r3 : f32, f32, f32, f32 } // ----- diff --git a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir --- a/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir +++ b/mlir/test/mlir-cpu-runner/linalg_integration_test.mlir @@ -22,9 +22,6 @@ // RUN: | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libmlir_test_cblas%shlibext,%linalg_test_lib_dir/libmlir_test_cblas_interface%shlibext \ // RUN: | FileCheck %s -#strided1D = affine_map<(d0) -> (d0)> -#strided2D = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)> - // Creates and returns a 1-D buffer of size %s filled with the value %f func @alloc_filled_f32(%s : index, %f : f32) -> memref { %c0 = constant 0 : index @@ -32,8 +29,8 @@ %c4 = constant 4 : index %s4 = muli %s, %c4: index %buf = alloc(%s4) {alignment = 256} : memref - %V = view %buf[%s][] : memref to memref - linalg.fill(%V, %f) : memref, f32 + %V = view %buf[%c0][%s] : memref to memref + linalg.fill(%V, %f) : memref, f32 return %buf : memref } @@ -50,11 +47,11 @@ %bB = call @alloc_filled_f32(%c16, %f1) : (index, f32) -> (memref) %bC = call @alloc_filled_f32(%c1, %f10) : (index, f32) -> (memref) - %A = view %bA[%c16][] : memref to memref - %B = view %bB[%c16][] : memref to memref - %C = view %bC[][] : memref to memref + %A = view %bA[%c0][%c16] : memref to memref + %B = view %bB[%c0][%c16] : memref to memref + %C = view %bC[%c0][] : memref to memref - linalg.dot(%A, %B, %C) : memref, memref, memref + linalg.dot(%A, %B, %C) : memref, memref, memref %res = load %C[] : memref dealloc %bC : memref @@ -82,12 +79,12 @@ %bB = call @alloc_filled_f32(%c160, %f1) : (index, f32) -> (memref) %bC = call @alloc_filled_f32(%c100, %f10) : (index, f32) -> (memref) - %A = view %bA[][%c10, %c16] : memref to memref - %B = view %bB[][%c16, %c10] : memref to memref - %C = view %bC[][%c10, %c10] : memref to memref + %A = view %bA[%c0][%c10, %c16] : memref to memref + %B = view %bB[%c0][%c16, %c10] : memref to memref + %C = view %bC[%c0][%c10, %c10] : memref to memref - linalg.matmul(%A, %B, %C) : memref, memref, memref - %res = load %C[%c6, %c7] : memref + linalg.matmul(%A, %B, %C) : memref, memref, memref + %res = load %C[%c6, %c7] : memref dealloc %bC : memref dealloc %bB : memref