diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp @@ -625,6 +625,126 @@ return success(); } +/// Match and rewrite SDDMM kernel. +static LogicalResult rewriteSDDMM(PatternRewriter &rewriter, + linalg::GenericOp op, bool enableRT) { + Location loc = op.getLoc(); + Value a = op.getOperand(1); + Value b = op.getOperand(2); + Value c = op.getOperand(0); + // D will store the result, i.e., C\spy AB. It is now holding empty allocated memrefs with the same shape as C. + Value d = op.getOperand(3); + SmallVector tokens; + + // Only admissible sparse matrix format and dense matrices. + bool isCOO = false; + SparseTensorType aTp = getSparseTensorType(a); + SparseTensorType bTp = getSparseTensorType(b); + SparseTensorType cTp = getSparseTensorType(c); + if (!areAdmissibleTypes(cTp, bTp, aTp, enableRT, isCOO)) + return failure(); + + // The SDDMM does the in-place operation. If the sparse matrix C is + // reused, e.g., later on when we remove redundant memcpy, we may need to + // duplicate it before the operation so that users could use the new copy + // instead. Start sparse kernel and copy data from host to device. + // a : bufA -> matA + // b : bufB -> matA + // c : memR/memC/memV -> rowC,colC,valC + Value nseC = rewriter.create(loc, c); + Value szm = linalg::createOrFoldDimOp(rewriter, loc, a, 0); + Value szk = linalg::createOrFoldDimOp(rewriter, loc, a, 1); + Value szn = linalg::createOrFoldDimOp(rewriter, loc, b, 1); + Value bufA = genTensorToMemref(rewriter, loc, a); + Value matA = genAllocCopy(rewriter, loc, bufA, tokens); + Value bufB = genTensorToMemref(rewriter, loc, b); + Value matB = genAllocCopy(rewriter, loc, bufB, tokens); + Value memR = genFirstPosOrCrds(rewriter, loc, c, isCOO, enableRT); + Value memC = genSecondCrds(rewriter, loc, c, isCOO, enableRT); + Value memV = genToValues(rewriter, loc, c); + Value memR_dst = genFirstPosOrCrds(rewriter, loc, d, isCOO, enableRT); + Value memC_dst = genSecondCrds(rewriter, loc, d, isCOO, enableRT); + Value memV_dst = genToValues(rewriter, loc, d); + Value rowC = genAllocCopy(rewriter, loc, memR, tokens); + Value colC = memC ? genAllocCopy(rewriter, loc, memC, tokens) : Value(); + Value valC = genAllocCopy(rewriter, loc, memV, tokens); + genBlockingWait(rewriter, loc, tokens); + tokens.clear(); + + // Create sparse environment and sparse matrix/dense matrix handles. + Type indexTp = rewriter.getIndexType(); + Type envHandleTp = rewriter.getType(); + Type dnMatHandleTp = rewriter.getType(); + Type spMatHandleTp = rewriter.getType(); + Type tokenTp = rewriter.getType(); + Value token = genFirstWait(rewriter, loc); + auto env = + rewriter.create(loc, envHandleTp, tokenTp, token); + Value handle = env.getResult(0); + token = env.getAsyncToken(); + + auto dmatA = rewriter.create( + loc, dnMatHandleTp, tokenTp, token, handle, szm, szk, matA); + Value dnA = dmatA.getResult(0); + token = dmatA.getAsyncToken(); + auto dmatB = rewriter.create( + loc, dnMatHandleTp, tokenTp, token, handle, szk, szn, matB); + Value dnB = dmatB.getResult(0); + token = dmatB.getAsyncToken(); + + Operation *spGenC = + genSpMat(rewriter, loc, spMatHandleTp, tokenTp, token, szm, szn, nseC, + rowC, colC, valC, isCOO, enableRT); + Value spMatC = spGenC->getResult(0); + token = spGenC->getResult(1); + + auto dnCType = llvm::cast(c.getType()).getElementType(); + // Precompute buffersize for SDDMM. + auto bufferComp = rewriter.create( + loc, indexTp, tokenTp, token, handle, dnA, dnB, spMatC, dnCType); + Value bufferSz = bufferComp.getResult(0); + token = bufferComp.getAsyncToken(); + auto buf = genAllocBuffer(rewriter, loc, bufferSz, token); + Value buffer = buf.getResult(0); + token = buf.getAsyncToken(); + + // Perform the SDDMM. + auto sddmmComp = rewriter.create( + loc, tokenTp, token, handle, dnA, dnB, spMatC, dnCType, buffer); + token = sddmmComp.getAsyncToken(); + + // Copy data back to host and free all the resoures. + token = rewriter.create(loc, tokenTp, token, dnA) + .getAsyncToken(); + token = rewriter.create(loc, tokenTp, token, dnB) + .getAsyncToken(); + token = rewriter.create(loc, tokenTp, token, spMatC) + .getAsyncToken(); + token = rewriter.create(loc, tokenTp, token, handle) + .getAsyncToken(); + tokens.push_back(token); + genBlockingWait(rewriter, loc, tokens); + tokens.clear(); + token = genFirstWait(rewriter, loc); + token = genCopyMemRef(rewriter, loc, memR_dst, rowC, token); + token = genCopyMemRef(rewriter, loc, memC_dst, colC, token); + token = genCopyMemRef(rewriter, loc, memV_dst, valC, token); + token = genDeallocMemRef(rewriter, loc, buffer, token); + token = genDeallocMemRef(rewriter, loc, matA, token); + token = genDeallocMemRef(rewriter, loc, matB, token); + token = genDeallocMemRef(rewriter, loc, rowC, token); + if (colC) + token = genDeallocMemRef(rewriter, loc, colC, token); + token = genDeallocMemRef(rewriter, loc, valC, token); + tokens.push_back(token); + genBlockingWait(rewriter, loc, tokens); + tokens.clear(); + + // Done. + rewriter.replaceOp(op, op.getDpsInitOperand(0)->get()); + return success(); +} + //===----------------------------------------------------------------------===// // Rewriting rules for direct code generation. //===----------------------------------------------------------------------===// @@ -776,6 +896,17 @@ return rewriteSpMM(rewriter, op, enableRT); } + // Recognize a SDDMM kernel. + if (numLoops == 3 && numTensors == 4 && + linalg::isParallelIterator(iteratorTypes[0]) && + linalg::isParallelIterator(iteratorTypes[1]) && + linalg::isReductionIterator(iteratorTypes[2]) && + // TODO: add transposed {i, k}, {k, j} + // TODO: maybe add transposed {i, j} in future + maps == infer({{i, j}, {i, k}, {k, j}, {i, j}})) { + return rewriteSDDMM(rewriter, op, enableRT); + } + return failure(); } diff --git a/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Dialect/SparseTensor/GPU/gpu_sampled_matmul_lib.mlir @@ -0,0 +1,110 @@ +// RUN: mlir-opt %s --linalg-generalize-named-ops \ +// RUN: --sparsification="enable-gpu-libgen" | FileCheck %s + +#trait_sampled_dense_dense = { + indexing_maps = [ + affine_map<(i,j,k) -> (i,j)>, // S + affine_map<(i,j,k) -> (i,k)>, // A + affine_map<(i,j,k) -> (k,j)>, // B + affine_map<(i,j,k) -> (i,j)> // X (out) + ], + iterator_types = ["parallel", "parallel", "reduction"], + doc = "X(i,j) += S(i,j) SUM_k A(i,k) B(k,j)" +} + +#trait_vec_op = { + indexing_maps = [ + affine_map<(i,j) -> (i,j)>, // a (in) + affine_map<(i,j) -> (i,j)>, // b (in) + affine_map<(i,j) -> (i,j)> // x (out) + ], + iterator_types = ["parallel", "parallel"] +} + +#CSR = #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }> + +#SortedCOO = #sparse_tensor.encoding<{ + lvlTypes = [ "compressed-nu", "singleton" ] +}> + +module { + +// CHECK-LABEL: func.func @sparse_sampled_dd( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64>, %[[VAL_1:.*]]: tensor<8x8xf64>, +// CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>, +// CHECK-SAME: %[[VAL_3:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> { +// CHECK: %[[VAL_4:.*]] = arith.constant 8 : index +// CHECK: %[[VAL_5:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_6:.*]] = sparse_tensor.number_of_entries %[[VAL_2]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: %[[VAL_7:.*]] = bufferization.to_memref %[[VAL_0]] : memref<8x8xf64> +// CHECK: %[[VAL_8:.*]] = gpu.wait async +// CHECK: %[[VAL_9:.*]], %[[VAL_10:.*]] = gpu.alloc async {{\[}}%[[VAL_8]]] () : memref<8x8xf64> +// CHECK: %[[VAL_11:.*]] = gpu.memcpy async {{\[}}%[[VAL_10]]] %[[VAL_9]], %[[VAL_7]] : memref<8x8xf64>, memref<8x8xf64> +// CHECK: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64> +// CHECK: %[[VAL_13:.*]] = gpu.wait async +// CHECK: %[[VAL_14:.*]], %[[VAL_15:.*]] = gpu.alloc async {{\[}}%[[VAL_13]]] () : memref<8x8xf64> +// CHECK: %[[VAL_16:.*]] = gpu.memcpy async {{\[}}%[[VAL_15]]] %[[VAL_14]], %[[VAL_12]] : memref<8x8xf64>, memref<8x8xf64> +// CHECK: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref> +// CHECK: %[[VAL_18:.*]] = sparse_tensor.coordinates %[[VAL_2]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref> +// CHECK: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_20:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref> +// CHECK: %[[VAL_21:.*]] = sparse_tensor.coordinates %[[VAL_3]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref> +// CHECK: %[[VAL_22:.*]] = sparse_tensor.values %[[VAL_3]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> to memref +// CHECK: %[[VAL_23:.*]] = gpu.wait async +// CHECK: %[[VAL_24:.*]] = memref.dim %[[VAL_17]], %[[VAL_5]] : memref> +// CHECK: %[[VAL_25:.*]], %[[VAL_26:.*]] = gpu.alloc async {{\[}}%[[VAL_23]]] (%[[VAL_24]]) : memref +// CHECK: %[[VAL_27:.*]] = gpu.memcpy async {{\[}}%[[VAL_26]]] %[[VAL_25]], %[[VAL_17]] : memref, memref> +// CHECK: %[[VAL_28:.*]] = gpu.wait async +// CHECK: %[[VAL_29:.*]] = memref.dim %[[VAL_18]], %[[VAL_5]] : memref> +// CHECK: %[[VAL_30:.*]], %[[VAL_31:.*]] = gpu.alloc async {{\[}}%[[VAL_28]]] (%[[VAL_29]]) : memref +// CHECK: %[[VAL_32:.*]] = gpu.memcpy async {{\[}}%[[VAL_31]]] %[[VAL_30]], %[[VAL_18]] : memref, memref> +// CHECK: %[[VAL_33:.*]] = gpu.wait async +// CHECK: %[[VAL_34:.*]] = memref.dim %[[VAL_19]], %[[VAL_5]] : memref +// CHECK: %[[VAL_35:.*]], %[[VAL_36:.*]] = gpu.alloc async {{\[}}%[[VAL_33]]] (%[[VAL_34]]) : memref +// CHECK: %[[VAL_37:.*]] = gpu.memcpy async {{\[}}%[[VAL_36]]] %[[VAL_35]], %[[VAL_19]] : memref, memref +// CHECK: gpu.wait {{\[}}%[[VAL_11]], %[[VAL_16]], %[[VAL_27]], %[[VAL_32]], %[[VAL_37]]] +// CHECK: %[[VAL_38:.*]] = gpu.wait async +// CHECK: %[[VAL_39:.*]], %[[VAL_40:.*]] = gpu.create_sparse_env async {{\[}}%[[VAL_38]]] +// CHECK: %[[VAL_41:.*]], %[[VAL_42:.*]] = gpu.create_dn_mat async {{\[}}%[[VAL_40]]] %[[VAL_39]], %[[VAL_4]], %[[VAL_4]], %[[VAL_9]] : memref<8x8xf64> +// CHECK: %[[VAL_43:.*]], %[[VAL_44:.*]] = gpu.create_dn_mat async {{\[}}%[[VAL_42]]] %[[VAL_39]], %[[VAL_4]], %[[VAL_4]], %[[VAL_14]] : memref<8x8xf64> +// CHECK: %[[VAL_45:.*]], %[[VAL_46:.*]] = gpu.create_coo async {{\[}}%[[VAL_44]]] %[[VAL_4]], %[[VAL_4]], %[[VAL_6]], %[[VAL_25]], %[[VAL_30]], %[[VAL_35]] : memref, memref, memref +// CHECK: %[[VAL_47:.*]], %[[VAL_48:.*]] = gpu.sddmm_buffer_size async {{\[}}%[[VAL_46]]] %[[VAL_39]], %[[VAL_41]], %[[VAL_43]], %[[VAL_45]] into f64 +// CHECK: %[[VAL_49:.*]], %[[VAL_50:.*]] = gpu.alloc async {{\[}}%[[VAL_48]]] (%[[VAL_47]]) : memref +// CHECK: %[[VAL_51:.*]] = gpu.sddmm async {{\[}}%[[VAL_50]]] %[[VAL_39]], %[[VAL_41]], %[[VAL_43]], %[[VAL_45]], %[[VAL_49]] : memref into f64 +// CHECK: %[[VAL_52:.*]] = gpu.destroy_dn_mat async {{\[}}%[[VAL_51]]] %[[VAL_41]] +// CHECK: %[[VAL_53:.*]] = gpu.destroy_dn_mat async {{\[}}%[[VAL_52]]] %[[VAL_43]] +// CHECK: %[[VAL_54:.*]] = gpu.destroy_sp_mat async {{\[}}%[[VAL_53]]] %[[VAL_45]] +// CHECK: %[[VAL_55:.*]] = gpu.destroy_sparse_env async {{\[}}%[[VAL_54]]] %[[VAL_39]] +// CHECK: gpu.wait {{\[}}%[[VAL_55]]] +// CHECK: %[[VAL_56:.*]] = gpu.wait async +// CHECK: %[[VAL_57:.*]] = gpu.memcpy async {{\[}}%[[VAL_56]]] %[[VAL_20]], %[[VAL_25]] : memref>, memref +// CHECK: %[[VAL_58:.*]] = gpu.memcpy async {{\[}}%[[VAL_57]]] %[[VAL_21]], %[[VAL_30]] : memref>, memref +// CHECK: %[[VAL_59:.*]] = gpu.memcpy async {{\[}}%[[VAL_58]]] %[[VAL_22]], %[[VAL_35]] : memref, memref +// CHECK: %[[VAL_60:.*]] = gpu.dealloc async {{\[}}%[[VAL_59]]] %[[VAL_49]] : memref +// CHECK: %[[VAL_61:.*]] = gpu.dealloc async {{\[}}%[[VAL_60]]] %[[VAL_9]] : memref<8x8xf64> +// CHECK: %[[VAL_62:.*]] = gpu.dealloc async {{\[}}%[[VAL_61]]] %[[VAL_14]] : memref<8x8xf64> +// CHECK: %[[VAL_63:.*]] = gpu.dealloc async {{\[}}%[[VAL_62]]] %[[VAL_25]] : memref +// CHECK: %[[VAL_64:.*]] = gpu.dealloc async {{\[}}%[[VAL_63]]] %[[VAL_30]] : memref +// CHECK: %[[VAL_65:.*]] = gpu.dealloc async {{\[}}%[[VAL_64]]] %[[VAL_35]] : memref +// CHECK: gpu.wait {{\[}}%[[VAL_65]]] +// CHECK: return %[[VAL_3]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: } +// +// A kernel that computes a direct sampled matrix matrix multiplication +// (with sparse result). +// Compute SDDMM C = C\spy AB +// VAL_0 is C +func.func @sparse_sampled_dd(%arga: tensor<8x8xf64>, %argb: tensor<8x8xf64>, %args: tensor<8x8xf64, #SortedCOO>, %argout: tensor<8x8xf64, #SortedCOO>) -> tensor<8x8xf64, #SortedCOO> { + %0 = linalg.generic #trait_sampled_dense_dense + ins(%args, %arga, %argb: tensor<8x8xf64, #SortedCOO>, tensor<8x8xf64>, tensor<8x8xf64>) + outs(%argout: tensor<8x8xf64, #SortedCOO>) { + ^bb(%s: f64, %a: f64, %b: f64, %x: f64): + %p = arith.mulf %a, %b : f64 + %q = arith.mulf %s, %p : f64 + %r = arith.addf %x, %q : f64 + linalg.yield %r : f64 + } -> tensor<8x8xf64, #SortedCOO> + return %0 : tensor<8x8xf64, #SortedCOO> +} + +} diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -59,17 +59,17 @@ func.func @sampled_dense_dense(%args: tensor, %arga: tensor, %argb: tensor, - %argx: tensor) -> tensor { + %argx: tensor) -> tensor { %0 = linalg.generic #trait_sampled_dense_dense ins(%args, %arga, %argb: tensor, tensor, tensor) - outs(%argx: tensor) { + outs(%argx: tensor) { ^bb(%s: f32, %a: f32, %b: f32, %x: f32): %0 = arith.mulf %a, %b : f32 %1 = arith.mulf %s, %0 : f32 %2 = arith.addf %x, %1 : f32 linalg.yield %2 : f32 - } -> tensor - return %0 : tensor + } -> tensor + return %0 : tensor } func.func private @getTensorFilename(index) -> (!Filename) @@ -85,11 +85,6 @@ %c10 = arith.constant 10 : index // Initialize dense matrices. - %x = tensor.generate %c5, %c5 { - ^bb0(%i : index, %j : index): - tensor.yield %d0 : f32 - } : tensor - %a = tensor.generate %c5, %c10 { ^bb0(%i: index, %j: index): %p = arith.addi %i, %c1 : index @@ -109,11 +104,13 @@ // Read the sparse matrix from file, construct sparse storage. %fileName = call @getTensorFilename(%c0) : (index) -> (!Filename) %s = sparse_tensor.new %fileName : !Filename to tensor + %x = sparse_tensor.new %fileName : !Filename to tensor // Call the kernel. %0 = call @sampled_dense_dense(%s, %a, %b, %x) : (tensor, - tensor, tensor, tensor) -> tensor + tensor, tensor, tensor) -> tensor + %1 = sparse_tensor.convert %0 : tensor to tensor // Print the result for verification. // @@ -124,7 +121,7 @@ // CHECK: ( 0, 520, 0, 0, 1250 ) // scf.for %i = %c0 to %c5 step %c1 { - %v = vector.transfer_read %0[%i, %c0], %d0: tensor, vector<5xf32> + %v = vector.transfer_read %1[%i, %c0], %d0: tensor, vector<5xf32> vector.print %v : vector<5xf32> } diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir copy from mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir copy to mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir --- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir @@ -1,32 +1,15 @@ -// DEFINE: %{option} = enable-runtime-library=true -// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} -// DEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ -// DEFINE: mlir-cpu-runner \ -// DEFINE: -e entry -entry-point-result=void \ -// DEFINE: -shared-libs=%mlir_c_runner_utils | \ -// DEFINE: FileCheck %s // -// RUN: %{compile} | %{run} +// NOTE: this test requires gpu-sm80 // -// Do the same run, but now with direct IR generation. -// REDEFINE: %{option} = enable-runtime-library=false -// RUN: %{compile} | %{run} +// RUN: mlir-opt %s \ +// RUN: --sparse-compiler="enable-runtime-library=true enable-gpu-libgen gpu-triple=nvptx64-nvidia-cuda gpu-chip=sm_80 gpu-features=+ptx71" \ +// RUN: | TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ +// RUN: mlir-cpu-runner \ +// RUN: --shared-libs=%mlir_cuda_runtime \ +// RUN: --shared-libs=%mlir_c_runner_utils \ +// RUN: --e entry --entry-point-result=void \ +// RUN: | FileCheck %s // -// Do the same run, but now with direct IR generation and vectorization. -// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" -// RUN: %{compile} | %{run} - -// Do the same run, but now with direct IR generation and, if available, VLA -// vectorization. -// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" -// REDEFINE: %{run} = TENSOR0="%mlir_src_dir/test/Integration/data/test.mtx" \ -// REDEFINE: %lli_host_or_aarch64_cmd \ -// REDEFINE: --entry-function=entry_lli \ -// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ -// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ -// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ -// REDEFINE: FileCheck %s -// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} !Filename = !llvm.ptr