diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -425,6 +425,12 @@ let summary = "Convert Standard dialect to SPIR-V dialect"; let constructor = "mlir::createConvertStandardToSPIRVPass()"; let dependentDialects = ["spirv::SPIRVDialect"]; + let options = [ + Option<"emulateNon32BitScalarTypes", "emulate-non-32-bit-scalar-types", + "bool", /*default=*/"true", + "Emulate non-32-bit scalar types with 32-bit ones if " + "missing native support"> + ]; } //===----------------------------------------------------------------------===// diff --git a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h --- a/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h +++ b/mlir/include/mlir/Dialect/SPIRV/Transforms/SPIRVConversion.h @@ -27,29 +27,35 @@ /// Type conversion from builtin types to SPIR-V types for shader interface. /// -/// Non-32-bit scalar types require special hardware support that may not exist -/// on all GPUs. This is reflected in SPIR-V as that non-32-bit scalar types -/// require special capabilities or extensions. Right now if a scalar type of a -/// certain bitwidth is not supported in the target environment, we use 32-bit -/// ones unconditionally. This requires the runtime to also feed in data with -/// a matched bitwidth and layout for interface types. The runtime can do that -/// by inspecting the SPIR-V module. -/// /// For memref types, this converter additionally performs type wrapping to /// satisfy shader interface requirements: shader interface types must be /// pointers to structs. -/// -/// TODO: We might want to introduce a way to control how unsupported bitwidth -/// are handled and explicitly fail if wanted. class SPIRVTypeConverter : public TypeConverter { public: - explicit SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr); - - /// Gets the number of bytes used for a type when converted to SPIR-V - /// type. Note that it doesnt account for whether the type is legal for a - /// SPIR-V target (described by spirv::TargetEnvAttr). Returns None on - /// failure. - static Optional getConvertedTypeNumBytes(Type); + struct Options { + /// Whether to emulate non-32-bit scalar types with 32-bit scalar types if + /// no native support. + /// + /// Non-32-bit scalar types require special hardware support that may not + /// exist on all GPUs. This is reflected in SPIR-V as that non-32-bit scalar + /// types require special capabilities or extensions. This option controls + /// whether to use 32-bit types to emulate, if a scalar type of a certain + /// bitwidth is not supported in the target environment. This requires the + /// runtime to also feed in data with a matched bitwidth and layout for + /// interface types. The runtime can do that by inspecting the SPIR-V + /// module. + /// + /// If the original scalar type has less than 32-bit, a multiple of its + /// values will be packed into one 32-bit value to be memory efficient. + bool emulateNon32BitScalarTypes; + + // Note: we need this instead of inline initializers becuase of + // https://bugs.llvm.org/show_bug.cgi?id=36684 + Options() : emulateNon32BitScalarTypes(true) {} + }; + + explicit SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr, + Options options = {}); /// Gets the SPIR-V correspondence for the standard index type. static Type getIndexType(MLIRContext *context); @@ -63,8 +69,12 @@ static Optional getStorageClassForMemorySpace(unsigned space); + /// Returns the options controlling the SPIR-V type converter. + const Options &getOptions() const; + private: spirv::TargetEnv targetEnv; + Options options; }; //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRVPass.cpp b/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRVPass.cpp --- a/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRVPass.cpp +++ b/mlir/lib/Conversion/StandardToSPIRV/StandardToSPIRVPass.cpp @@ -34,7 +34,10 @@ std::unique_ptr target = SPIRVConversionTarget::get(targetAttr); - SPIRVTypeConverter typeConverter(targetAttr); + SPIRVTypeConverter::Options options; + options.emulateNon32BitScalarTypes = this->emulateNon32BitScalarTypes; + SPIRVTypeConverter typeConverter(targetAttr, options); + RewritePatternSet patterns(context); populateStandardToSPIRVPatterns(typeConverter, patterns); populateTensorToSPIRVPatterns(typeConverter, diff --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp --- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp +++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp @@ -155,74 +155,74 @@ #undef STORAGE_SPACE_MAP_LIST -// TODO: This is a utility function that should probably be -// exposed by the SPIR-V dialect. Keeping it local till the use case arises. -static Optional getTypeNumBytes(Type t) { - if (t.isa()) { - auto bitWidth = t.getIntOrFloatBitWidth(); +// TODO: This is a utility function that should probably be exposed by the +// SPIR-V dialect. Keeping it local till the use case arises. +static Optional +getTypeNumBytes(const SPIRVTypeConverter::Options &options, Type type) { + if (type.isa()) { + auto bitWidth = type.getIntOrFloatBitWidth(); // According to the SPIR-V spec: // "There is no physical size or bit pattern defined for values with boolean // type. If they are stored (in conjunction with OpVariable), they can only // be used with logical addressing operations, not physical, and only with // non-externally visible shader Storage Classes: Workgroup, CrossWorkgroup, // Private, Function, Input, and Output." - if (bitWidth == 1) { + if (bitWidth == 1) return llvm::None; - } return bitWidth / 8; } - if (auto vecType = t.dyn_cast()) { - auto elementSize = getTypeNumBytes(vecType.getElementType()); + if (auto vecType = type.dyn_cast()) { + auto elementSize = getTypeNumBytes(options, vecType.getElementType()); if (!elementSize) return llvm::None; - return vecType.getNumElements() * *elementSize; + return vecType.getNumElements() * elementSize.getValue(); } - if (auto memRefType = t.dyn_cast()) { + if (auto memRefType = type.dyn_cast()) { // TODO: Layout should also be controlled by the ABI attributes. For now // using the layout from MemRef. int64_t offset; SmallVector strides; if (!memRefType.hasStaticShape() || - failed(getStridesAndOffset(memRefType, strides, offset))) { + failed(getStridesAndOffset(memRefType, strides, offset))) return llvm::None; - } + // To get the size of the memref object in memory, the total size is the // max(stride * dimension-size) computed for all dimensions times the size // of the element. - auto elementSize = getTypeNumBytes(memRefType.getElementType()); - if (!elementSize) { + auto elementSize = getTypeNumBytes(options, memRefType.getElementType()); + if (!elementSize) return llvm::None; - } - if (memRefType.getRank() == 0) { + + if (memRefType.getRank() == 0) return elementSize; - } + auto dims = memRefType.getShape(); if (llvm::is_contained(dims, ShapedType::kDynamicSize) || offset == MemRefType::getDynamicStrideOrOffset() || - llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset())) { + llvm::is_contained(strides, MemRefType::getDynamicStrideOrOffset())) return llvm::None; - } + int64_t memrefSize = -1; - for (auto shape : enumerate(dims)) { + for (auto shape : enumerate(dims)) memrefSize = std::max(memrefSize, shape.value() * strides[shape.index()]); - } + return (offset + memrefSize) * elementSize.getValue(); } - if (auto tensorType = t.dyn_cast()) { - if (!tensorType.hasStaticShape()) { + if (auto tensorType = type.dyn_cast()) { + if (!tensorType.hasStaticShape()) return llvm::None; - } - auto elementSize = getTypeNumBytes(tensorType.getElementType()); - if (!elementSize) { + + auto elementSize = getTypeNumBytes(options, tensorType.getElementType()); + if (!elementSize) return llvm::None; - } + int64_t size = elementSize.getValue(); - for (auto shape : tensorType.getShape()) { + for (auto shape : tensorType.getShape()) size *= shape; - } + return size; } @@ -230,12 +230,9 @@ return llvm::None; } -Optional SPIRVTypeConverter::getConvertedTypeNumBytes(Type t) { - return getTypeNumBytes(t); -} - /// Converts a scalar `type` to a suitable type under the given `targetEnv`. static Type convertScalarType(const spirv::TargetEnv &targetEnv, + const SPIRVTypeConverter::Options options, spirv::ScalarType type, Optional storageClass = {}) { // Get extension and capability requirements for the given type. @@ -251,13 +248,9 @@ // Otherwise we need to adjust the type, which really means adjusting the // bitwidth given this is a scalar type. - // TODO: We are unconditionally converting the bitwidth here, - // this might be okay for non-interface types (i.e., types used in - // Private/Function storage classes), but not for interface types (i.e., - // types used in StorageBuffer/Uniform/PushConstant/etc. storage classes). - // This is because the later actually affects the ABI contract with the - // runtime. So we may want to expose a control on SPIRVTypeConverter to fail - // conversion if we cannot change there. + + if (!options.emulateNon32BitScalarTypes) + return nullptr; if (auto floatType = type.dyn_cast()) { LLVM_DEBUG(llvm::dbgs() << type << " converted to 32-bit for SPIR-V\n"); @@ -272,6 +265,7 @@ /// Converts a vector `type` to a suitable type under the given `targetEnv`. static Type convertVectorType(const spirv::TargetEnv &targetEnv, + const SPIRVTypeConverter::Options &options, VectorType type, Optional storageClass = {}) { if (type.getRank() == 1 && type.getNumElements() == 1) @@ -296,7 +290,8 @@ return type; auto elementType = convertScalarType( - targetEnv, type.getElementType().cast(), storageClass); + targetEnv, options, type.getElementType().cast(), + storageClass); if (elementType) return VectorType::get(type.getShape(), elementType); return nullptr; @@ -304,11 +299,12 @@ /// Converts a tensor `type` to a suitable type under the given `targetEnv`. /// -/// Note that this is mainly for lowering constant tensors.In SPIR-V one can +/// Note that this is mainly for lowering constant tensors. In SPIR-V one can /// create composite constants with OpConstantComposite to embed relative large /// constant values and use OpCompositeExtract and OpCompositeInsert to /// manipulate, like what we do for vectors. static Type convertTensorType(const spirv::TargetEnv &targetEnv, + const SPIRVTypeConverter::Options &options, TensorType type) { // TODO: Handle dynamic shapes. if (!type.hasStaticShape()) { @@ -324,8 +320,8 @@ return nullptr; } - Optional scalarSize = getTypeNumBytes(scalarType); - Optional tensorSize = getTypeNumBytes(type); + Optional scalarSize = getTypeNumBytes(options, scalarType); + Optional tensorSize = getTypeNumBytes(options, type); if (!scalarSize || !tensorSize) { LLVM_DEBUG(llvm::dbgs() << type << " illegal: cannot deduce element count\n"); @@ -333,10 +329,10 @@ } auto arrayElemCount = *tensorSize / *scalarSize; - auto arrayElemType = convertScalarType(targetEnv, scalarType); + auto arrayElemType = convertScalarType(targetEnv, options, scalarType); if (!arrayElemType) return nullptr; - Optional arrayElemSize = getTypeNumBytes(arrayElemType); + Optional arrayElemSize = getTypeNumBytes(options, arrayElemType); if (!arrayElemSize) { LLVM_DEBUG(llvm::dbgs() << type << " illegal: cannot deduce converted element size\n"); @@ -347,6 +343,7 @@ } static Type convertMemrefType(const spirv::TargetEnv &targetEnv, + const SPIRVTypeConverter::Options &options, MemRefType type) { Optional storageClass = SPIRVTypeConverter::getStorageClassForMemorySpace( @@ -360,9 +357,11 @@ Type arrayElemType; Type elementType = type.getElementType(); if (auto vecType = elementType.dyn_cast()) { - arrayElemType = convertVectorType(targetEnv, vecType, storageClass); + arrayElemType = + convertVectorType(targetEnv, options, vecType, storageClass); } else if (auto scalarType = elementType.dyn_cast()) { - arrayElemType = convertScalarType(targetEnv, scalarType, storageClass); + arrayElemType = + convertScalarType(targetEnv, options, scalarType, storageClass); } else { LLVM_DEBUG( llvm::dbgs() @@ -373,7 +372,7 @@ if (!arrayElemType) return nullptr; - Optional elementSize = getTypeNumBytes(elementType); + Optional elementSize = getTypeNumBytes(options, elementType); if (!elementSize) { LLVM_DEBUG(llvm::dbgs() << type << " illegal: cannot deduce element size\n"); @@ -387,7 +386,7 @@ return spirv::PointerType::get(structType, *storageClass); } - Optional memrefSize = getTypeNumBytes(type); + Optional memrefSize = getTypeNumBytes(options, type); if (!memrefSize) { LLVM_DEBUG(llvm::dbgs() << type << " illegal: cannot deduce element count\n"); @@ -396,7 +395,7 @@ auto arrayElemCount = *memrefSize / *elementSize; - Optional arrayElemSize = getTypeNumBytes(arrayElemType); + Optional arrayElemSize = getTypeNumBytes(options, arrayElemType); if (!arrayElemSize) { LLVM_DEBUG(llvm::dbgs() << type << " illegal: cannot deduce converted element size\n"); @@ -414,8 +413,9 @@ return spirv::PointerType::get(structType, *storageClass); } -SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr) - : targetEnv(targetAttr) { +SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr, + Options options) + : targetEnv(targetAttr), options(options) { // Add conversions. The order matters here: later ones will be tried earlier. // Allow all SPIR-V dialect specific types. This assumes all builtin types @@ -434,26 +434,26 @@ addConversion([this](IntegerType intType) -> Optional { if (auto scalarType = intType.dyn_cast()) - return convertScalarType(targetEnv, scalarType); + return convertScalarType(this->targetEnv, this->options, scalarType); return Type(); }); addConversion([this](FloatType floatType) -> Optional { if (auto scalarType = floatType.dyn_cast()) - return convertScalarType(targetEnv, scalarType); + return convertScalarType(this->targetEnv, this->options, scalarType); return Type(); }); addConversion([this](VectorType vectorType) { - return convertVectorType(targetEnv, vectorType); + return convertVectorType(this->targetEnv, this->options, vectorType); }); addConversion([this](TensorType tensorType) { - return convertTensorType(targetEnv, tensorType); + return convertTensorType(this->targetEnv, this->options, tensorType); }); addConversion([this](MemRefType memRefType) { - return convertMemrefType(targetEnv, memRefType); + return convertMemrefType(this->targetEnv, this->options, memRefType); }); } @@ -490,8 +490,11 @@ } Type resultType; - if (fnType.getNumResults() == 1) + if (fnType.getNumResults() == 1) { resultType = getTypeConverter()->convertType(fnType.getResult(0)); + if (!resultType) + return failure(); + } // Create the converted spv.func op. auto newFuncOp = rewriter.create( diff --git a/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir --- a/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir +++ b/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir @@ -1,4 +1,5 @@ // RUN: mlir-opt -split-input-file -convert-std-to-spirv %s -o - | FileCheck %s +// RUN: mlir-opt -split-input-file -convert-std-to-spirv="emulate-non-32-bit-scalar-types=false" %s -o - | FileCheck %s --check-prefix=NOEMU //===----------------------------------------------------------------------===// // Integer types @@ -14,18 +15,30 @@ // CHECK-SAME: i32 // CHECK-SAME: si32 // CHECK-SAME: ui32 +// NOEMU-LABEL: func @integer8 +// NOEMU-SAME: i8 +// NOEMU-SAME: si8 +// NOEMU-SAME: ui8 func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return } // CHECK-LABEL: spv.func @integer16 // CHECK-SAME: i32 // CHECK-SAME: si32 // CHECK-SAME: ui32 +// NOEMU-LABEL: func @integer16 +// NOEMU-SAME: i16 +// NOEMU-SAME: si16 +// NOEMU-SAME: ui16 func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return } // CHECK-LABEL: spv.func @integer64 // CHECK-SAME: i32 // CHECK-SAME: si32 // CHECK-SAME: ui32 +// NOEMU-LABEL: func @integer64 +// NOEMU-SAME: i64 +// NOEMU-SAME: si64 +// NOEMU-SAME: ui64 func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return } } // end module @@ -42,18 +55,30 @@ // CHECK-SAME: i8 // CHECK-SAME: si8 // CHECK-SAME: ui8 +// NOEMU-LABEL: spv.func @integer8 +// NOEMU-SAME: i8 +// NOEMU-SAME: si8 +// NOEMU-SAME: ui8 func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return } // CHECK-LABEL: spv.func @integer16 // CHECK-SAME: i16 // CHECK-SAME: si16 // CHECK-SAME: ui16 +// NOEMU-LABEL: spv.func @integer16 +// NOEMU-SAME: i16 +// NOEMU-SAME: si16 +// NOEMU-SAME: ui16 func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return } // CHECK-LABEL: spv.func @integer64 // CHECK-SAME: i64 // CHECK-SAME: si64 // CHECK-SAME: ui64 +// NOEMU-LABEL: spv.func @integer64 +// NOEMU-SAME: i64 +// NOEMU-SAME: si64 +// NOEMU-SAME: ui64 func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return } } // end module @@ -106,10 +131,14 @@ // CHECK-LABEL: spv.func @float16 // CHECK-SAME: f32 +// NOEMU-LABEL: func @float16 +// NOEMU-SAME: f16 func @float16(%arg0: f16) { return } // CHECK-LABEL: spv.func @float64 // CHECK-SAME: f32 +// NOEMU-LABEL: func @float64 +// NOEMU-SAME: f64 func @float64(%arg0: f64) { return } } // end module @@ -124,10 +153,14 @@ // CHECK-LABEL: spv.func @float16 // CHECK-SAME: f16 +// NOEMU-LABEL: spv.func @float16 +// NOEMU-SAME: f16 func @float16(%arg0: f16) { return } // CHECK-LABEL: spv.func @float64 // CHECK-SAME: f64 +// NOEMU-LABEL: spv.func @float64 +// NOEMU-SAME: f64 func @float64(%arg0: f64) { return } } // end module @@ -276,34 +309,50 @@ // CHECK-LABEL: spv.func @memref_8bit_StorageBuffer // CHECK-SAME: !spv.ptr [0])>, StorageBuffer> +// NOEMU-LABEL: func @memref_8bit_StorageBuffer +// NOEMU-SAME: memref<16xi8> func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return } // CHECK-LABEL: spv.func @memref_8bit_Uniform // CHECK-SAME: !spv.ptr [0])>, Uniform> +// NOEMU-LABEL: func @memref_8bit_Uniform +// NOEMU-SAME: memref<16xsi8, 4> func @memref_8bit_Uniform(%arg0: memref<16xsi8, 4>) { return } // CHECK-LABEL: spv.func @memref_8bit_PushConstant // CHECK-SAME: !spv.ptr [0])>, PushConstant> +// NOEMU-LABEL: func @memref_8bit_PushConstant +// NOEMU-SAME: memref<16xui8, 7> func @memref_8bit_PushConstant(%arg0: memref<16xui8, 7>) { return } // CHECK-LABEL: spv.func @memref_16bit_StorageBuffer // CHECK-SAME: !spv.ptr [0])>, StorageBuffer> +// NOEMU-LABEL: func @memref_16bit_StorageBuffer +// NOEMU-SAME: memref<16xi16> func @memref_16bit_StorageBuffer(%arg0: memref<16xi16, 0>) { return } // CHECK-LABEL: spv.func @memref_16bit_Uniform // CHECK-SAME: !spv.ptr [0])>, Uniform> +// NOEMU-LABEL: func @memref_16bit_Uniform +// NOEMU-SAME: memref<16xsi16, 4> func @memref_16bit_Uniform(%arg0: memref<16xsi16, 4>) { return } // CHECK-LABEL: spv.func @memref_16bit_PushConstant // CHECK-SAME: !spv.ptr [0])>, PushConstant> +// NOEMU-LABEL: func @memref_16bit_PushConstant +// NOEMU-SAME: memref<16xui16, 7> func @memref_16bit_PushConstant(%arg0: memref<16xui16, 7>) { return } // CHECK-LABEL: spv.func @memref_16bit_Input // CHECK-SAME: !spv.ptr [0])>, Input> +// NOEMU-LABEL: func @memref_16bit_Input +// NOEMU-SAME: memref<16xf16, 9> func @memref_16bit_Input(%arg3: memref<16xf16, 9>) { return } // CHECK-LABEL: spv.func @memref_16bit_Output // CHECK-SAME: !spv.ptr [0])>, Output> +// NOEMU-LABEL: func @memref_16bit_Output +// NOEMU-SAME: memref<16xf16, 10> func @memref_16bit_Output(%arg4: memref<16xf16, 10>) { return } } // end module @@ -321,11 +370,16 @@ // CHECK-LABEL: spv.func @memref_8bit_PushConstant // CHECK-SAME: !spv.ptr [0])>, PushConstant> +// NOEMU-LABEL: spv.func @memref_8bit_PushConstant +// NOEMU-SAME: !spv.ptr [0])>, PushConstant> func @memref_8bit_PushConstant(%arg0: memref<16xi8, 7>) { return } // CHECK-LABEL: spv.func @memref_16bit_PushConstant // CHECK-SAME: !spv.ptr [0])>, PushConstant> // CHECK-SAME: !spv.ptr [0])>, PushConstant> +// NOEMU-LABEL: spv.func @memref_16bit_PushConstant +// NOEMU-SAME: !spv.ptr [0])>, PushConstant> +// NOEMU-SAME: !spv.ptr [0])>, PushConstant> func @memref_16bit_PushConstant( %arg0: memref<16xi16, 7>, %arg1: memref<16xf16, 7> @@ -346,11 +400,16 @@ // CHECK-LABEL: spv.func @memref_8bit_StorageBuffer // CHECK-SAME: !spv.ptr [0])>, StorageBuffer> +// NOEMU-LABEL: spv.func @memref_8bit_StorageBuffer +// NOEMU-SAME: !spv.ptr [0])>, StorageBuffer> func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, 0>) { return } // CHECK-LABEL: spv.func @memref_16bit_StorageBuffer // CHECK-SAME: !spv.ptr [0])>, StorageBuffer> // CHECK-SAME: !spv.ptr [0])>, StorageBuffer> +// NOEMU-LABEL: spv.func @memref_16bit_StorageBuffer +// NOEMU-SAME: !spv.ptr [0])>, StorageBuffer> +// NOEMU-SAME: !spv.ptr [0])>, StorageBuffer> func @memref_16bit_StorageBuffer( %arg0: memref<16xi16, 0>, %arg1: memref<16xf16, 0> @@ -371,11 +430,16 @@ // CHECK-LABEL: spv.func @memref_8bit_Uniform // CHECK-SAME: !spv.ptr [0])>, Uniform> +// NOEMU-LABEL: spv.func @memref_8bit_Uniform +// NOEMU-SAME: !spv.ptr [0])>, Uniform> func @memref_8bit_Uniform(%arg0: memref<16xi8, 4>) { return } // CHECK-LABEL: spv.func @memref_16bit_Uniform // CHECK-SAME: !spv.ptr [0])>, Uniform> // CHECK-SAME: !spv.ptr [0])>, Uniform> +// NOEMU-LABEL: spv.func @memref_16bit_Uniform +// NOEMU-SAME: !spv.ptr [0])>, Uniform> +// NOEMU-SAME: !spv.ptr [0])>, Uniform> func @memref_16bit_Uniform( %arg0: memref<16xi16, 4>, %arg1: memref<16xf16, 4> @@ -395,10 +459,14 @@ // CHECK-LABEL: spv.func @memref_16bit_Input // CHECK-SAME: !spv.ptr [0])>, Input> +// NOEMU-LABEL: spv.func @memref_16bit_Input +// NOEMU-SAME: !spv.ptr [0])>, Input> func @memref_16bit_Input(%arg3: memref<16xf16, 9>) { return } // CHECK-LABEL: spv.func @memref_16bit_Output // CHECK-SAME: !spv.ptr [0])>, Output> +// NOEMU-LABEL: spv.func @memref_16bit_Output +// NOEMU-SAME: !spv.ptr [0])>, Output> func @memref_16bit_Output(%arg4: memref<16xi16, 10>) { return } } // end module