diff --git a/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h --- a/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Pipelines/Passes.h @@ -41,12 +41,16 @@ PassOptions::Option enableSIMDIndex32{ *this, "enable-simd-index32", desc("Enable i32 indexing into vectors (for efficiency)"), init(false)}; + PassOptions::Option enableVLAVectorization{ + *this, "enable-vla-vectorization", + desc("Enable vector length agnostic vectorization"), init(false)}; /// Projects out the options for `createSparsificationPass`. SparsificationOptions sparsificationOptions() const { return SparsificationOptions(sparseParallelizationStrategy(parallelization), sparseVectorizationStrategy(vectorization), - vectorLength, enableSIMDIndex32); + vectorLength, enableSIMDIndex32, + enableVLAVectorization); } // These options must be kept in sync with `ConvertVectorToLLVMBase`. diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.h @@ -54,16 +54,19 @@ /// Sparsification options. struct SparsificationOptions { SparsificationOptions(SparseParallelizationStrategy p, - SparseVectorizationStrategy v, unsigned vl, bool e) + SparseVectorizationStrategy v, unsigned vl, bool e, + bool vla) : parallelizationStrategy(p), vectorizationStrategy(v), vectorLength(vl), - enableSIMDIndex32(e) {} + enableSIMDIndex32(e), enableVLAVectorization(vla) {} SparsificationOptions() : SparsificationOptions(SparseParallelizationStrategy::kNone, - SparseVectorizationStrategy::kNone, 1u, false) {} + SparseVectorizationStrategy::kNone, 1u, false, + false) {} SparseParallelizationStrategy parallelizationStrategy; SparseVectorizationStrategy vectorizationStrategy; unsigned vectorLength; bool enableSIMDIndex32; + bool enableVLAVectorization; }; /// Sets up sparsification rewriting rules with the given options. diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td --- a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Passes.td @@ -70,7 +70,9 @@ Option<"vectorLength", "vl", "int32_t", "1", "Set the vector length">, Option<"enableSIMDIndex32", "enable-simd-index32", "bool", "false", - "Enable i32 indexing into vectors (for efficiency)"> + "Enable i32 indexing into vectors (for efficiency)">, + Option<"enableVLAVectorization", "enable-vla-vectorization", "bool", + "false", "Enable vector length agnostic vectorization"> ]; } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorPasses.cpp @@ -39,6 +39,7 @@ vectorization = static_cast(options.vectorizationStrategy); vectorLength = options.vectorLength; enableSIMDIndex32 = options.enableSIMDIndex32; + enableVLAVectorization = options.enableVLAVectorization; } void runOnOperation() override { @@ -48,7 +49,7 @@ SparsificationOptions options( sparseParallelizationStrategy(parallelization), sparseVectorizationStrategy(vectorization), vectorLength, - enableSIMDIndex32); + enableSIMDIndex32, enableVLAVectorization); // Apply rewriting. populateSparsificationPatterns(patterns, options); vector::populateVectorToVectorCanonicalizationPatterns(patterns); diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -56,7 +56,7 @@ idxs(numTensors, std::vector(numLoops)), redExp(-1u), redVal(), redKind(kNoReduc), sparseOut(op), outerParNest(nest), lexIdx(), expValues(), expFilled(), expAdded(), expCount(), curVecLength(1), - curVecMask() {} + curVecMask(), generateVLA(false) {} /// Sparsification options. SparsificationOptions options; /// Universal dense indices and upper bounds (by index). The loops array @@ -95,6 +95,8 @@ // Current vector length and mask. unsigned curVecLength; Value curVecMask; + // Generate vector code in a vector length agnostic way. + bool generateVLA; }; } // namespace @@ -554,7 +556,7 @@ /// Constructs vector type. static VectorType vectorType(CodeGen &codegen, Type etp) { - return VectorType::get(codegen.curVecLength, etp); + return VectorType::get(codegen.curVecLength, etp, codegen.generateVLA); } /// Constructs vector type from pointer. @@ -1119,14 +1121,21 @@ isParallelFor(codegen, isOuter, isReduction, isSparse, isVector); // Prepare vector length. - if (isVector) + if (isVector) { codegen.curVecLength = codegen.options.vectorLength; + codegen.generateVLA = codegen.options.enableVLAVectorization; + } // Loop bounds and increment. Location loc = op.getLoc(); Value lo = isSparse ? codegen.pidxs[tensor][idx] : codegen.loops[idx]; Value hi = isSparse ? codegen.highs[tensor][idx] : codegen.sizes[idx]; Value step = constantIndex(rewriter, loc, codegen.curVecLength); + if (codegen.generateVLA) { + Value vscale = rewriter.create( + loc, IndexType::get(rewriter.getContext())); + step = rewriter.create(loc, vscale, step); + } // Emit a parallel loop. if (isParallel) { diff --git a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir --- a/mlir/test/Dialect/SparseTensor/sparse_vector.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_vector.mlir @@ -6,6 +6,8 @@ // RUN: FileCheck %s --check-prefix=CHECK-VEC2 // RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=16 enable-simd-index32=true" -cse -split-input-file | \ // RUN: FileCheck %s --check-prefix=CHECK-VEC3 +// RUN: mlir-opt %s -sparsification="vectorization-strategy=2 vl=4 enable-vla-vectorization=true" -cse -split-input-file | \ +// RUN: FileCheck %s --check-prefix=CHECK-VEC4 #DenseVector = #sparse_tensor.encoding<{ dimLevelType = [ "dense" ] }> @@ -54,6 +56,24 @@ // CHECK-VEC2: } // CHECK-VEC2: return // +// CHECK-VEC4: #[[$map:.*]] = affine_map<(d0, d1)[s0] -> (s0, d0 - d1) +// CHECK-VEC4-LABEL: func @scale_d +// CHECK-VEC4-DAG: %[[c0:.*]] = arith.constant 0 : index +// CHECK-VEC4-DAG: %[[c4:.*]] = arith.constant 4 : index +// CHECK-VEC4-DAG: %[[c1024:.*]] = arith.constant 1024 : index +// CHECK-VEC4-DAG: %[[v0:.*]] = arith.constant dense<0.000000e+00> : vector<[4]xf32> +// CHECK-VEC4-DAG: %[[vscale:.*]] = vector.vscale +// CHECK-VEC4: %[[step:.*]] = arith.muli %[[vscale]], %[[c4]] : index +// CHECK-VEC4: scf.for %[[i:.*]] = %[[c0]] to %[[c1024]] step %[[step]] { +// CHECK-VEC4: %[[sub:.*]] = affine.min #[[$map]](%[[c1024]], %[[i]])[%[[step]]] +// CHECK-VEC4: %[[mask:.*]] = vector.create_mask %[[sub]] : vector<[4]xi1> +// CHECK-VEC4: %[[val:.*]] = vector.maskedload %{{.*}}[%[[i]]], %[[mask]], %[[v0]] : memref, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[scalev:.*]] = vector.broadcast %{{.*}} : f32 to vector<[4]xf32> +// CHECK-VEC4: %[[scaled:.*]] = arith.mulf %[[val]], %[[scalev]] : vector<[4]xf32> +// CHECK-VEC4: vector.maskedstore %{{.*}}[%[[i]]], %[[mask]], %[[scaled]] : memref<1024xf32>, vector<[4]xi1>, vector<[4]xf32> +// CHECK-VEC4: } +// CHECK-VEC4: return +// func @scale_d(%arga: tensor<1024xf32, #DenseVector>, %b: f32, %argx: tensor<1024xf32>) -> tensor<1024xf32> { %0 = linalg.generic #trait_scale_d ins(%arga: tensor<1024xf32, #DenseVector>) @@ -169,6 +189,33 @@ // CHECK-VEC3: } // CHECK-VEC3: return // +// CHECK-VEC4: #[[$map:.*]] = affine_map<(d0, d1)[s0] -> (s0, d0 - d1) +// CHECK-VEC4-LABEL: func @mul_s +// CHECK-VEC4-DAG: %[[c0:.*]] = arith.constant 0 : index +// CHECK-VEC4-DAG: %[[c1:.*]] = arith.constant 1 : index +// CHECK-VEC4-DAG: %[[c4:.*]] = arith.constant 4 : index +// CHECK-VEC4-DAG: %[[v0i:.*]] = arith.constant dense<0> : vector<[4]xi32> +// CHECK-VEC4-DAG: %[[v0f:.*]] = arith.constant dense<0.000000e+00> : vector<[4]xf32> +// CHECK-VEC4: %[[p:.*]] = memref.load %{{.*}}[%[[c0]]] : memref +// CHECK-VEC4: %[[a:.*]] = arith.extui %[[p]] : i32 to i64 +// CHECK-VEC4: %[[q:.*]] = arith.index_cast %[[a]] : i64 to index +// CHECK-VEC4: %[[r:.*]] = memref.load %{{.*}}[%[[c1]]] : memref +// CHECK-VEC4: %[[b:.*]] = arith.extui %[[r]] : i32 to i64 +// CHECK-VEC4: %[[s:.*]] = arith.index_cast %[[b]] : i64 to index +// CHECK-VEC4: %[[vscale:.*]] = vector.vscale +// CHECK-VEC4: %[[step:.*]] = arith.muli %[[vscale]], %[[c4]] : index +// CHECK-VEC4: scf.for %[[i:.*]] = %[[q]] to %[[s]] step %[[step]] { +// CHECK-VEC4: %[[sub:.*]] = affine.min #[[$map]](%[[s]], %[[i]])[%[[step]]] +// CHECK-VEC4: %[[mask:.*]] = vector.create_mask %[[sub]] : vector<[4]xi1> +// CHECK-VEC4: %[[li:.*]] = vector.maskedload %{{.*}}[%[[i]]], %[[mask]], %[[v0i]] : memref, vector<[4]xi1>, vector<[4]xi32> into vector<[4]xi32> +// CHECK-VEC4: %[[lii64:.*]] = arith.extui %[[li]] : vector<[4]xi32> to vector<[4]xi64> +// CHECK-VEC4: %[[la:.*]] = vector.maskedload %{{.*}}[%[[i]]], %[[mask]], %[[v0f]] : memref, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[lb:.*]] = vector.gather %{{.*}}[%[[c0]]] [%[[lii64]]], %[[mask]], %[[v0f]] : memref<1024xf32>, vector<[4]xi64>, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[m:.*]] = arith.mulf %[[la]], %[[lb]] : vector<[4]xf32> +// CHECK-VEC4: vector.scatter %{{.*}}[%[[c0]]] [%[[lii64]]], %[[mask]], %[[m]] : memref<1024xf32>, vector<[4]xi64>, vector<[4]xi1>, vector<[4]xf32> +// CHECK-VEC4: } +// CHECK-VEC4: return +// func @mul_s(%arga: tensor<1024xf32, #SparseVector>, %argb: tensor<1024xf32>, %argx: tensor<1024xf32>) -> tensor<1024xf32> { %0 = linalg.generic #trait_mul_s ins(%arga, %argb: tensor<1024xf32, #SparseVector>, tensor<1024xf32>) @@ -242,6 +289,29 @@ // CHECK-VEC2: %{{.*}} = vector.reduction "add", %[[red]] : vector<16xf32> into f32 // CHECK-VEC2: return // +// CHECK-VEC4: #[[$map:.*]] = affine_map<(d0, d1)[s0] -> (s0, d0 - d1) +// CHECK-VEC4-LABEL: func @reduction_d +// CHECK-VEC4-DAG: %[[c0:.*]] = arith.constant 0 : index +// CHECK-VEC4-DAG: %[[c4:.*]] = arith.constant 4 : index +// CHECK-VEC4-DAG: %[[c1024:.*]] = arith.constant 1024 : index +// CHECK-VEC4-DAG: %[[v0:.*]] = arith.constant dense<0.000000e+00> : vector<[4]xf32> +// CHECK-VEC4: %[[l:.*]] = memref.load %{{.*}}[] : memref +// CHECK-VEC4: %[[vscale:.*]] = vector.vscale +// CHECK-VEC4: %[[step:.*]] = arith.muli %[[vscale]], %[[c4]] : index +// CHECK-VEC4: %[[r:.*]] = vector.insertelement %[[l]], %[[v0]][%[[c0]] : index] : vector<[4]xf32> +// CHECK-VEC4: %[[red:.*]] = scf.for %[[i:.*]] = %[[c0]] to %[[c1024]] step %[[step]] iter_args(%[[red_in:.*]] = %[[r]]) -> (vector<[4]xf32>) { +// CHECK-VEC4: %[[sub:.*]] = affine.min #[[$map]](%[[c1024]], %[[i]])[%[[step]]] +// CHECK-VEC4: %[[mask:.*]] = vector.create_mask %[[sub]] : vector<[4]xi1> +// CHECK-VEC4: %[[la:.*]] = vector.maskedload %{{.*}}[%[[i]]], %[[mask]], %[[v0]] : memref, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[lb:.*]] = vector.maskedload %{{.*}}[%[[i]]], %[[mask]], %[[v0]] : memref<1024xf32>, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[m:.*]] = arith.mulf %[[la]], %[[lb]] : vector<[4]xf32> +// CHECK-VEC4: %[[a:.*]] = arith.addf %[[red_in]], %[[m]] : vector<[4]xf32> +// CHECK-VEC4: %[[sa:.*]] = arith.select %[[mask]], %[[a]], %[[red_in]] : vector<[4]xi1>, vector<[4]xf32> +// CHECK-VEC4: scf.yield %[[sa]] : vector<[4]xf32> +// CHECK-VEC4: } +// CHECK-VEC4: %{{.*}} = vector.reduction "add", %[[red]] : vector<[4]xf32> into f32 +// CHECK-VEC4: return +// func @reduction_d(%arga: tensor<1024xf32, #DenseVector>, %argb: tensor<1024xf32>, %argx: tensor) -> tensor { %0 = linalg.generic #trait_reduction_d ins(%arga, %argb: tensor<1024xf32, #DenseVector>, tensor<1024xf32>) @@ -374,6 +444,37 @@ // CHECK-VEC3: } // CHECK-VEC3: return // +// CHECK-VEC4: #[[$map:.*]] = affine_map<(d0, d1)[s0] -> (s0, d0 - d1) +// CHECK-VEC4-LABEL: func @mul_ds +// CHECK-VEC4-DAG: %[[c0:.*]] = arith.constant 0 : index +// CHECK-VEC4-DAG: %[[c1:.*]] = arith.constant 1 : index +// CHECK-VEC4-DAG: %[[c4:.*]] = arith.constant 4 : index +// CHECK-VEC4-DAG: %[[c512:.*]] = arith.constant 512 : index +// CHECK-VEC4-DAG: %[[v0i:.*]] = arith.constant dense<0> : vector<[4]xi32> +// CHECK-VEC4-DAG: %[[v0f:.*]] = arith.constant dense<0.000000e+00> : vector<[4]xf32> +// CHECK-VEC4: scf.for %[[i:.*]] = %[[c0]] to %[[c512]] step %[[c1]] { +// CHECK-VEC4: %[[p:.*]] = memref.load %{{.*}}[%[[i]]] : memref +// CHECK-VEC4: %[[a:.*]] = arith.extui %[[p]] : i32 to i64 +// CHECK-VEC4: %[[q:.*]] = arith.index_cast %[[a]] : i64 to index +// CHECK-VEC4: %[[a:.*]] = arith.addi %[[i]], %[[c1]] : index +// CHECK-VEC4: %[[r:.*]] = memref.load %{{.*}}[%[[a]]] : memref +// CHECK-VEC4: %[[b:.*]] = arith.extui %[[r]] : i32 to i64 +// CHECK-VEC4: %[[s:.*]] = arith.index_cast %[[b]] : i64 to index +// CHECK-VEC4: %[[vscale:.*]] = vector.vscale +// CHECK-VEC4: %[[step:.*]] = arith.muli %[[vscale]], %[[c4]] : index +// CHECK-VEC4: scf.for %[[j:.*]] = %[[q]] to %[[s]] step %[[step]] { +// CHECK-VEC4: %[[sub:.*]] = affine.min #[[$map]](%[[s]], %[[j]])[%[[step]]] +// CHECK-VEC4: %[[mask:.*]] = vector.create_mask %[[sub]] : vector<[4]xi1> +// CHECK-VEC4: %[[lji32:.*]] = vector.maskedload %{{.*}}[%[[j]]], %[[mask]], %[[v0i]] : memref, vector<[4]xi1>, vector<[4]xi32> into vector<[4]xi32> +// CHECK-VEC4: %[[lj:.*]] = arith.extui %[[lji32]] : vector<[4]xi32> to vector<[4]xi64> +// CHECK-VEC4: %[[la:.*]] = vector.maskedload %{{.*}}[%[[j]]], %[[mask]], %[[v0f]] : memref, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[lb:.*]] = vector.gather %{{.*}}[%[[i]], %[[c0]]] [%[[lj]]], %[[mask]], %[[v0f]] : memref<512x1024xf32>, vector<[4]xi64>, vector<[4]xi1>, vector<[4]xf32> into vector<[4]xf32> +// CHECK-VEC4: %[[m:.*]] = arith.mulf %[[la]], %[[lb]] : vector<[4]xf32> +// CHECK-VEC4: vector.scatter %{{.*}}[%[[i]], %[[c0]]] [%[[lj]]], %[[mask]], %[[m]] : memref<512x1024xf32>, vector<[4]xi64>, vector<[4]xi1>, vector<[4]xf32> +// CHECK-VEC4: } +// CHECK-VEC4: } +// CHECK-VEC4: return +// func @mul_ds(%arga: tensor<512x1024xf32, #SparseMatrix>, %argb: tensor<512x1024xf32>, %argx: tensor<512x1024xf32>) -> tensor<512x1024xf32> { %0 = linalg.generic #trait_mul_ds ins(%arga, %argb: tensor<512x1024xf32, #SparseMatrix>, tensor<512x1024xf32>) @@ -457,6 +558,32 @@ // CHECK-VEC2: } // CHECK-VEC2: return // +// CHECK-VEC4: #[[$map:.*]] = affine_map<(d0, d1)[s0] -> (s0, d0 - d1) +// CHECK-VEC4-LABEL: func @add_dense +// CHECK-VEC4-DAG: %[[c0:.*]] = arith.constant 0 : index +// CHECK-VEC4-DAG: %[[c1:.*]] = arith.constant 1 : index +// CHECK-VEC4-DAG: %[[c4:.*]] = arith.constant 4 : index +// CHECK-VEC4-DAG: %[[c32:.*]] = arith.constant 32 : index +// CHECK-VEC4-DAG: %[[v0idx:.*]] = arith.constant dense<0> : vector<[4]xindex> +// CHECK-VEC4-DAG: %[[v0f64:.*]] = arith.constant dense<0.000000e+00> : vector<[4]xf64> +// CHECK-VEC4: scf.for %[[i:.*]] = %[[c0]] to %[[c32]] step %[[c1]] { +// CHECK-VEC4: %[[lo:.*]] = memref.load %{{.*}}[%[[i]]] : memref +// CHECK-VEC4: %[[i1:.*]] = arith.addi %[[i]], %[[c1]] : index +// CHECK-VEC4: %[[hi:.*]] = memref.load %{{.*}}[%[[i1]]] : memref +// CHECK-VEC4: %[[vscale:.*]] = vector.vscale +// CHECK-VEC4: %[[step:.*]] = arith.muli %[[vscale]], %[[c4]] : index +// CHECK-VEC4: scf.for %[[jj:.*]] = %[[lo]] to %[[hi]] step %[[step]] { +// CHECK-VEC4: %[[sub:.*]] = affine.min #[[$map]](%[[hi]], %[[jj]])[%[[step]]] +// CHECK-VEC4: %[[mask:.*]] = vector.create_mask %[[sub]] : vector<[4]xi1> +// CHECK-VEC4: %[[j:.*]] = vector.maskedload %{{.*}}[%[[jj]]], %[[mask]], %[[v0idx]] : memref +// CHECK-VEC4: %[[x:.*]] = vector.gather %{{.*}}[%[[i1]], %[[c0]]] [%[[j]]], %[[mask]], %[[v0f64]] : memref<33x64xf64> +// CHECK-VEC4: %[[a:.*]] = vector.maskedload %{{.*}}[%[[jj]]], %[[mask]], %[[v0f64]] : memref +// CHECK-VEC4: %[[s:.*]] = arith.addf %[[x]], %[[a]] : vector<[4]xf64> +// CHECK-VEC4: vector.scatter %{{.*}}[%[[i1]], %[[c0]]] [%[[j]]], %[[mask]], %[[s]] : memref<33x64xf64> +// CHECK-VEC4: } +// CHECK-VEC4: } +// CHECK-VEC4: return +// func @add_dense(%arga: tensor<32x64xf64, #SparseMatrix>, %argx: tensor<33x64xf64> {linalg.inplaceable = true}) -> tensor<33x64xf64> { %0 = linalg.generic #trait_affine