diff --git a/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td b/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td --- a/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td +++ b/mlir/include/mlir/Dialect/SPIRV/TargetAndABI.td @@ -39,12 +39,16 @@ // This attribute specifies the limits for various resources on the target // architecture. // -// See https://renderdoc.org/vkspec_chunked/chap36.html#limits for the complete -// list of limits and their explanation for the Vulkan API. The following ones -// are those affecting SPIR-V CodeGen. +// See https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#limits +// for the complete list of limits and their explanation for the Vulkan API. +// The following ones are those affecting SPIR-V CodeGen. Their default value +// are the from Vulkan limit requirements: +// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/vkspec.html#limits-minmax def SPV_ResourceLimitsAttr : StructAttr<"ResourceLimitsAttr", SPIRV_Dialect, [ - StructFieldAttr<"max_compute_workgroup_invocations", I32Attr>, - StructFieldAttr<"max_compute_workgroup_size", I32ElementsAttr> + StructFieldAttr<"max_compute_workgroup_invocations", + DefaultValuedAttr>, + StructFieldAttr<"max_compute_workgroup_size", + DefaultValuedAttr> ]>; #endif // SPIRV_TARGET_AND_ABI diff --git a/mlir/test/Conversion/GPUToSPIRV/if.mlir b/mlir/test/Conversion/GPUToSPIRV/if.mlir --- a/mlir/test/Conversion/GPUToSPIRV/if.mlir +++ b/mlir/test/Conversion/GPUToSPIRV/if.mlir @@ -3,9 +3,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @main(%arg0 : memref<10xf32>, %arg1 : i1) { %c0 = constant 1 : index diff --git a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir --- a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir +++ b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir @@ -3,9 +3,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @load_store(%arg0: memref<12x4xf32>, %arg1: memref<12x4xf32>, %arg2: memref<12x4xf32>) { %c0 = constant 0 : index diff --git a/mlir/test/Conversion/GPUToSPIRV/loop.mlir b/mlir/test/Conversion/GPUToSPIRV/loop.mlir --- a/mlir/test/Conversion/GPUToSPIRV/loop.mlir +++ b/mlir/test/Conversion/GPUToSPIRV/loop.mlir @@ -3,9 +3,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @loop(%arg0 : memref<10xf32>, %arg1 : memref<10xf32>) { %c0 = constant 1 : index diff --git a/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir b/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir --- a/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir +++ b/mlir/test/Conversion/GPUToSPIRV/module-structure-opencl.mlir @@ -2,10 +2,7 @@ module attributes { gpu.container_module, - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { gpu.module @kernels { // CHECK-LABEL: spv.module @{{.*}} Physical64 OpenCL diff --git a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir --- a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir +++ b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir @@ -16,11 +16,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - { - max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32> - }> + #spv.vce, {}> } { // CHECK: spv.globalVariable @@ -78,11 +74,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - { - max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32> - }> + #spv.vce, {}> } { func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) { // expected-error @+1 {{failed to legalize operation 'linalg.generic'}} @@ -111,11 +103,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - { - max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32> - }> + #spv.vce, {}> } { func @single_workgroup_reduction(%input: memref<16xi32>, %output: memref<1xi32>) attributes { spv.entry_point_abi = {local_size = dense<[32, 1, 1]>: vector<3xi32>} @@ -146,11 +134,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - { - max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32> - }> + #spv.vce, {}> } { func @single_workgroup_reduction(%input: memref<16x8xi32>, %output: memref<16xi32>) attributes { spv.entry_point_abi = {local_size = dense<[16, 8, 1]>: vector<3xi32>} diff --git a/mlir/test/Conversion/StandardToSPIRV/alloc.mlir b/mlir/test/Conversion/StandardToSPIRV/alloc.mlir --- a/mlir/test/Conversion/StandardToSPIRV/alloc.mlir +++ b/mlir/test/Conversion/StandardToSPIRV/alloc.mlir @@ -6,9 +6,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) { @@ -34,9 +32,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) { @@ -65,9 +61,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @two_allocs() { @@ -88,9 +82,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @two_allocs_vector() { @@ -112,9 +104,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : index) { @@ -129,9 +119,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @alloc_dealloc_mem() { @@ -146,9 +134,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @alloc_dealloc_dynamic_workgroup_mem(%arg0 : memref<4x?xf32, 3>) { @@ -163,9 +149,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { func @alloc_dealloc_mem(%arg0 : memref<4x5xf32>) { diff --git a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir --- a/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir +++ b/mlir/test/Conversion/StandardToSPIRV/std-ops-to-spirv.mlir @@ -6,9 +6,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // Check integer operation conversions. @@ -146,10 +144,7 @@ // Check that types are converted to 32-bit when no special capabilities. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: @int_vector23 @@ -177,10 +172,7 @@ // Check that types are converted to 32-bit when no special capabilities that // are not supported. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { func @int_vector4_invalid(%arg0: vector<4xi64>) { @@ -199,10 +191,7 @@ //===----------------------------------------------------------------------===// module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: @bitwise_scalar @@ -348,9 +337,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: @constant @@ -412,10 +399,7 @@ // Check that constants are converted to 32-bit when no special capability. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: @constant_16bit @@ -498,9 +482,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: index_cast1 @@ -631,10 +613,7 @@ // Checks that cast types will be adjusted when no special capabilities for // non-32-bit scalar types. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: @fpext1 @@ -682,9 +661,8 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { //===----------------------------------------------------------------------===// @@ -750,9 +728,7 @@ // TODO: Test i1 and i64 types. module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: @load_i8 @@ -895,9 +871,7 @@ module attributes { spv.target_env = #spv.target_env< #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + [SPV_KHR_storage_buffer_storage_class, SPV_KHR_16bit_storage]>, {}> } { // CHECK-LABEL: @load_i8 diff --git a/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir b/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir --- a/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir +++ b/mlir/test/Conversion/StandardToSPIRV/std-types-to-spirv.mlir @@ -7,10 +7,7 @@ // Check that non-32-bit integer types are converted to 32-bit types if the // corresponding capabilities are not available. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @integer8 @@ -38,10 +35,7 @@ // Check that non-32-bit integer types are kept untouched if the corresponding // capabilities are available. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @integer8 @@ -68,10 +62,7 @@ // Check that weird bitwidths are not supported. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-NOT: spv.func @integer4 @@ -92,10 +83,7 @@ // The index type is always converted into i32. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @index_type @@ -113,10 +101,7 @@ // Check that non-32-bit float types are converted to 32-bit types if the // corresponding capabilities are not available. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @float16 @@ -134,10 +119,7 @@ // Check that non-32-bit float types are kept untouched if the corresponding // capabilities are available. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @float16 @@ -154,10 +136,7 @@ // Check that bf16 is not supported. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-NOT: spv.func @bf16_type @@ -174,10 +153,7 @@ // Check that capabilities for scalar types affects vector types too: no special // capabilities available means using turning element types to 32-bit. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @int_vector @@ -206,9 +182,7 @@ // special capabilities means keep vector types untouched. module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: spv.func @int_vector @@ -235,10 +209,7 @@ // Check that 1- or > 4-element vectors are not supported. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-NOT: spv.func @one_element_vector @@ -258,9 +229,7 @@ // Check memory spaces. module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: func @memref_mem_space @@ -285,10 +254,7 @@ // Check that boolean memref is not supported at the moment. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: func @memref_type({{%.*}}: memref<3xi1>) @@ -304,10 +270,7 @@ // requires special capability and extension: convert them to 32-bit if not // satisfied. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @memref_8bit_StorageBuffer @@ -352,9 +315,7 @@ module attributes { spv.target_env = #spv.target_env< #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, {}> } { // CHECK-LABEL: spv.func @memref_8bit_PushConstant @@ -379,9 +340,7 @@ module attributes { spv.target_env = #spv.target_env< #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, {}> } { // CHECK-LABEL: spv.func @memref_8bit_StorageBuffer @@ -406,9 +365,7 @@ module attributes { spv.target_env = #spv.target_env< #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, {}> } { // CHECK-LABEL: spv.func @memref_8bit_Uniform @@ -432,9 +389,7 @@ // and extension is available. module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: spv.func @memref_16bit_Input @@ -452,9 +407,7 @@ // Check that memref offset and strides affect the array size. module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: spv.func @memref_offset_strides @@ -488,10 +441,7 @@ // Dynamic shapes module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // Check that unranked shapes are not supported. @@ -512,10 +462,7 @@ // Vector types module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: func @memref_vector @@ -539,10 +486,7 @@ // Vector types, check that sizes not available in SPIR-V are not transformed. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: func @memref_vector_wrong_size @@ -562,9 +506,7 @@ // Check that tensor element types are kept untouched with proper capabilities. module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: spv.func @int_tensor_types @@ -595,10 +537,7 @@ // Check that tensor element types are changed to 32-bit without capabilities. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: spv.func @int_tensor_types @@ -629,10 +568,7 @@ // Check that dynamic shapes are not supported. module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK-LABEL: func @unranked_tensor diff --git a/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir b/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir --- a/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir @@ -1,10 +1,7 @@ // RUN: mlir-opt -spirv-lower-abi-attrs -verify-diagnostics %s -o - | FileCheck %s module attributes { - spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { spv.module Physical64 OpenCL { // CHECK-LABEL: spv.module diff --git a/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir b/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir --- a/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir @@ -2,9 +2,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: spv.module diff --git a/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir b/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir --- a/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir @@ -2,9 +2,7 @@ module attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { // CHECK-LABEL: spv.module diff --git a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir --- a/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir +++ b/mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir @@ -10,9 +10,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @iadd(%val : i32) -> i32 "None" { %0 = spv.IAdd %val, %val: i32 @@ -26,9 +24,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @group_non_uniform_ballot(%predicate : i1) -> vector<4xi32> "None" { %0 = spv.GroupNonUniformBallot "Workgroup" %predicate : vector<4xi32> @@ -45,9 +41,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @iadd(%val : i32) -> i32 "None" { %0 = spv.IAdd %val, %val: i32 @@ -61,9 +55,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @iadd(%val : i32) -> i32 "None" { %0 = spv.IAdd %val, %val: i32 @@ -84,9 +76,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" { %0 = spv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32 @@ -97,9 +87,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @group_non_uniform_iadd(%val : i32) -> i32 "None" { %0 = spv.GroupNonUniformIAdd "Subgroup" "Reduce" %val : i32 @@ -113,9 +101,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @iadd_function(%val : i8) -> i8 "None" { %0 = spv.IAdd %val, %val : i8 @@ -127,9 +113,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @fadd_function(%val : f16) -> f16 "None" { %0 = spv.FAdd %val, %val : f16 @@ -148,9 +132,7 @@ spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + [SPV_KHR_shader_ballot, SPV_KHR_shader_clock, SPV_KHR_variable_pointers]>, {}> } { spv.func @subgroup_ballot(%predicate : i1) -> vector<4xi32> "None" { %0 = spv.SubgroupBallotKHR %predicate: vector<4xi32> @@ -165,9 +147,7 @@ // CHECK: requires #spv.vce spv.module Logical Vulkan attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @iadd(%val : i32) -> i32 "None" { %0 = spv.IAdd %val, %val: i32 @@ -182,9 +162,7 @@ // CHECK: requires #spv.vce spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { spv.func @iadd_storage_buffer(%ptr : !spv.ptr) -> i16 "None" { %0 = spv.Load "StorageBuffer" %ptr : i16 @@ -200,8 +178,7 @@ spv.module Logical GLSL450 attributes { spv.target_env = #spv.target_env< #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + {}> } { spv.globalVariable @data : !spv.ptr, Uniform> spv.globalVariable @img : !spv.ptr, UniformConstant> diff --git a/mlir/test/Dialect/SPIRV/target-and-abi.mlir b/mlir/test/Dialect/SPIRV/target-and-abi.mlir --- a/mlir/test/Dialect/SPIRV/target-and-abi.mlir +++ b/mlir/test/Dialect/SPIRV/target-and-abi.mlir @@ -104,15 +104,6 @@ // spv.target_env //===----------------------------------------------------------------------===// -func @target_env_missing_limits() attributes { - spv.target_env = #spv.target_env< - #spv.vce, - // expected-error @+1 {{limits must be a dictionary attribute containing two 32-bit integer attributes 'max_compute_workgroup_invocations' and 'max_compute_workgroup_size'}} - {max_compute_workgroup_size = dense<[128, 64, 64]> : vector<3xi32>}> -} { return } - -// ----- - func @target_env_wrong_limits() attributes { spv.target_env = #spv.target_env< #spv.vce, diff --git a/mlir/test/Dialect/SPIRV/target-env.mlir b/mlir/test/Dialect/SPIRV/target-env.mlir --- a/mlir/test/Dialect/SPIRV/target-env.mlir +++ b/mlir/test/Dialect/SPIRV/target-env.mlir @@ -35,7 +35,7 @@ // CHECK-LABEL: @cmp_exchange_weak_suitable_version_capabilities func @cmp_exchange_weak_suitable_version_capabilities(%ptr: !spv.ptr, %value: i32, %comparator: i32) -> i32 attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.AtomicCompareExchangeWeak "Workgroup" "AcquireRelease|AtomicCounterMemory" "Acquire" %0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr, i32, i32) -> (i32) @@ -44,7 +44,7 @@ // CHECK-LABEL: @cmp_exchange_weak_unsupported_version func @cmp_exchange_weak_unsupported_version(%ptr: !spv.ptr, %value: i32, %comparator: i32) -> i32 attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_atomic_compare_exchange_weak_op %0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr, i32, i32) -> (i32) @@ -57,7 +57,7 @@ // CHECK-LABEL: @group_non_uniform_ballot_suitable_version func @group_non_uniform_ballot_suitable_version(%predicate: i1) -> vector<4xi32> attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.GroupNonUniformBallot "Workgroup" %0 = "test.convert_to_group_non_uniform_ballot_op"(%predicate): (i1) -> (vector<4xi32>) @@ -66,7 +66,7 @@ // CHECK-LABEL: @group_non_uniform_ballot_unsupported_version func @group_non_uniform_ballot_unsupported_version(%predicate: i1) -> vector<4xi32> attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_group_non_uniform_ballot_op %0 = "test.convert_to_group_non_uniform_ballot_op"(%predicate): (i1) -> (vector<4xi32>) @@ -79,7 +79,7 @@ // CHECK-LABEL: @cmp_exchange_weak_missing_capability_kernel func @cmp_exchange_weak_missing_capability_kernel(%ptr: !spv.ptr, %value: i32, %comparator: i32) -> i32 attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_atomic_compare_exchange_weak_op %0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr, i32, i32) -> (i32) @@ -88,7 +88,7 @@ // CHECK-LABEL: @cmp_exchange_weak_missing_capability_atomic_storage func @cmp_exchange_weak_missing_capability_atomic_storage(%ptr: !spv.ptr, %value: i32, %comparator: i32) -> i32 attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_atomic_compare_exchange_weak_op %0 = "test.convert_to_atomic_compare_exchange_weak_op"(%ptr, %value, %comparator): (!spv.ptr, i32, i32) -> (i32) @@ -97,7 +97,7 @@ // CHECK-LABEL: @subgroup_ballot_missing_capability func @subgroup_ballot_missing_capability(%predicate: i1) -> vector<4xi32> attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_subgroup_ballot_op %0 = "test.convert_to_subgroup_ballot_op"(%predicate): (i1) -> (vector<4xi32>) @@ -106,7 +106,7 @@ // CHECK-LABEL: @bit_reverse_directly_implied_capability func @bit_reverse_directly_implied_capability(%operand: i32) -> i32 attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.BitReverse %0 = "test.convert_to_bit_reverse_op"(%operand): (i32) -> (i32) @@ -115,7 +115,7 @@ // CHECK-LABEL: @bit_reverse_recursively_implied_capability func @bit_reverse_recursively_implied_capability(%operand: i32) -> i32 attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.BitReverse %0 = "test.convert_to_bit_reverse_op"(%operand): (i32) -> (i32) @@ -128,7 +128,7 @@ // CHECK-LABEL: @subgroup_ballot_suitable_extension func @subgroup_ballot_suitable_extension(%predicate: i1) -> vector<4xi32> attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.SubgroupBallotKHR %0 = "test.convert_to_subgroup_ballot_op"(%predicate): (i1) -> (vector<4xi32>) @@ -137,7 +137,7 @@ // CHECK-LABEL: @subgroup_ballot_missing_extension func @subgroup_ballot_missing_extension(%predicate: i1) -> vector<4xi32> attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_subgroup_ballot_op %0 = "test.convert_to_subgroup_ballot_op"(%predicate): (i1) -> (vector<4xi32>) @@ -146,7 +146,7 @@ // CHECK-LABEL: @module_suitable_extension1 func @module_suitable_extension1() attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.module PhysicalStorageBuffer64 Vulkan "test.convert_to_module_op"() : () ->() @@ -155,7 +155,7 @@ // CHECK-LABEL: @module_suitable_extension2 func @module_suitable_extension2() attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.module PhysicalStorageBuffer64 Vulkan "test.convert_to_module_op"() : () -> () @@ -164,7 +164,7 @@ // CHECK-LABEL: @module_missing_extension_mm func @module_missing_extension_mm() attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_module_op "test.convert_to_module_op"() : () -> () @@ -173,7 +173,7 @@ // CHECK-LABEL: @module_missing_extension_am func @module_missing_extension_am() attributes { - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: test.convert_to_module_op "test.convert_to_module_op"() : () -> () @@ -183,7 +183,7 @@ // CHECK-LABEL: @module_implied_extension func @module_implied_extension() attributes { // Version 1.5 implies SPV_KHR_vulkan_memory_model and SPV_KHR_physical_storage_buffer. - spv.target_env = #spv.target_env<#spv.vce, {max_compute_workgroup_invocations = 128 : i32, max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + spv.target_env = #spv.target_env<#spv.vce, {}> } { // CHECK: spv.module PhysicalStorageBuffer64 Vulkan "test.convert_to_module_op"() : () -> () diff --git a/mlir/test/mlir-vulkan-runner/addf.mlir b/mlir/test/mlir-vulkan-runner/addf.mlir --- a/mlir/test/mlir-vulkan-runner/addf.mlir +++ b/mlir/test/mlir-vulkan-runner/addf.mlir @@ -4,9 +4,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { gpu.module @kernels { gpu.func @kernel_add(%arg0 : memref<8xf32>, %arg1 : memref<8xf32>, %arg2 : memref<8xf32>) diff --git a/mlir/test/mlir-vulkan-runner/addi.mlir b/mlir/test/mlir-vulkan-runner/addi.mlir --- a/mlir/test/mlir-vulkan-runner/addi.mlir +++ b/mlir/test/mlir-vulkan-runner/addi.mlir @@ -4,9 +4,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { gpu.module @kernels { gpu.func @kernel_addi(%arg0 : memref<8xi32>, %arg1 : memref<8x8xi32>, %arg2 : memref<8x8x8xi32>) diff --git a/mlir/test/mlir-vulkan-runner/addi8.mlir b/mlir/test/mlir-vulkan-runner/addi8.mlir --- a/mlir/test/mlir-vulkan-runner/addi8.mlir +++ b/mlir/test/mlir-vulkan-runner/addi8.mlir @@ -4,9 +4,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { gpu.module @kernels { gpu.func @kernel_addi(%arg0 : memref<8xi8>, %arg1 : memref<8x8xi8>, %arg2 : memref<8x8x8xi32>) diff --git a/mlir/test/mlir-vulkan-runner/mulf.mlir b/mlir/test/mlir-vulkan-runner/mulf.mlir --- a/mlir/test/mlir-vulkan-runner/mulf.mlir +++ b/mlir/test/mlir-vulkan-runner/mulf.mlir @@ -4,9 +4,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { gpu.module @kernels { gpu.func @kernel_mul(%arg0 : memref<4x4xf32>, %arg1 : memref<4x4xf32>, %arg2 : memref<4x4xf32>) diff --git a/mlir/test/mlir-vulkan-runner/subf.mlir b/mlir/test/mlir-vulkan-runner/subf.mlir --- a/mlir/test/mlir-vulkan-runner/subf.mlir +++ b/mlir/test/mlir-vulkan-runner/subf.mlir @@ -4,9 +4,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { gpu.module @kernels { gpu.func @kernel_sub(%arg0 : memref<8x4x4xf32>, %arg1 : memref<4x4xf32>, %arg2 : memref<8x4x4xf32>) diff --git a/mlir/test/mlir-vulkan-runner/time.mlir b/mlir/test/mlir-vulkan-runner/time.mlir --- a/mlir/test/mlir-vulkan-runner/time.mlir +++ b/mlir/test/mlir-vulkan-runner/time.mlir @@ -7,9 +7,7 @@ module attributes { gpu.container_module, spv.target_env = #spv.target_env< - #spv.vce, - {max_compute_workgroup_invocations = 128 : i32, - max_compute_workgroup_size = dense<[128, 128, 64]> : vector<3xi32>}> + #spv.vce, {}> } { gpu.module @kernels { gpu.func @kernel_add(%arg0 : memref<16384xf32>, %arg1 : memref<16384xf32>, %arg2 : memref<16384xf32>)