diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.h @@ -190,6 +190,11 @@ Value genAddress(OpBuilder &builder, Location loc, size_t tid, size_t dim, Value iv); + /// Generates the segment high for an non-unique level (to fast foward + /// duplicated coordinates). + Value genSegmentHigh(OpBuilder &builder, Location loc, size_t tid, size_t lvl, + Value pos, Value pHi); + /// Generates instructions to compute the coordinate of tensors[tid][lvl] /// under the current loop context. The final argument is the /// collapsed-output level, whereas this function handles converting @@ -262,6 +267,8 @@ /// are updated to remain current within the current loop. // TODO: we may want to rename "pidx(s)" to `posCursor(s)` or similar. std::vector> pidxs; + // The segment upper bound for non-uniques level after de-duplication. + std::vector> segHi; std::vector> coord; std::vector> highs; std::vector> lvlSizes; diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/LoopEmitter.cpp @@ -127,6 +127,50 @@ return add; } +Value LoopEmitter::genSegmentHigh(OpBuilder &builder, Location loc, size_t tid, + size_t lvl, Value pos, Value pHi) { + Value prevCrd = genIndexLoad(builder, loc, crdBuffer[tid][lvl], pos); + // De-deplicates repeated elements. + // + // while (pos < pHi && coord[pos] == prev_coord) + // pos++; + // return pos; + auto whileOp = builder.create( + loc, builder.getIndexType(), pos, + /*beforeBuilder=*/ + [this, tid, lvl, pHi, prevCrd](OpBuilder &builder, Location loc, + ValueRange ivs) { + Value inBound = builder.create( + loc, arith::CmpIPredicate::ult, ivs[0], pHi); + auto ifOp = + builder.create(loc, builder.getI1Type(), inBound, true); + { + OpBuilder::InsertionGuard guard(builder); + // Load the next coordinates only this inbound (to avoid OOB + // acccesses). + builder.setInsertionPointToStart(ifOp.thenBlock()); + Value nxCrd = genIndexLoad(builder, loc, crdBuffer[tid][lvl], ivs[0]); + Value cont = builder.create( + loc, arith::CmpIPredicate::eq, nxCrd, prevCrd); + builder.create(loc, cont); + // Else, the position is out of bound, yield false to terminate the + // loop. + builder.setInsertionPointToStart(ifOp.elseBlock()); + builder.create(loc, constantI1(builder, loc, false)); + } + builder.create(loc, ifOp.getResults()[0], ivs); + }, + /*afterBuilder=*/ + [](OpBuilder &builder, Location loc, ValueRange ivs) { + // pos ++ + Value nxPos = builder.create( + loc, ivs[0], constantIndex(builder, loc, 1)); + builder.create(loc, nxPos); + }); + // Return the segment high. + return whileOp.getResult(0); +} + Value LoopEmitter::genSparseCrd(OpBuilder &builder, Location loc, size_t tid, size_t dstLvl) { Value crd = constantIndex(builder, loc, 0); @@ -162,6 +206,7 @@ this->isSparseSlices.assign(tensors.size(), false); this->dimTypes.assign(tensors.size(), std::vector()); this->pidxs.assign(tensors.size(), std::vector()); + this->segHi.assign(tensors.size(), std::vector()); this->coord.assign(tensors.size(), std::vector()); this->highs.assign(tensors.size(), std::vector()); this->lvlSizes.assign(tensors.size(), std::vector()); @@ -202,6 +247,7 @@ // Initialize using empty value. pidxs[tid].assign(rank, Value()); + segHi[tid].assign(rank, Value()); coord[tid].assign(rank, Value()); highs[tid].assign(rank, Value()); lvlSizes[tid].assign(rank, Value()); @@ -535,7 +581,9 @@ SmallVector operands; // Construct the while-loop with a parameter for each coordinate. Type indexType = builder.getIndexType(); - for (auto [tid, dim] : llvm::zip(tids, dims)) { + for (auto [t, d] : llvm::zip(tids, dims)) { + auto tid = t; + auto dim = d; if (isCompressedDLT(dimTypes[tid][dim]) || isSingletonDLT(dimTypes[tid][dim])) { assert(pidxs[tid][dim]); @@ -666,6 +714,13 @@ loopTag); assert(loopStack.size() == loopSeqStack.size()); + for (auto [tid, dim] : llvm::zip(tids, dims)) { + if (!isUniqueDLT(dimTypes[tid][dim])) { + segHi[tid][dim] = genSegmentHigh(builder, loc, tid, dim, pidxs[tid][dim], + highs[tid][dim]); + } + } + // Emits extra locals emitExtraLocalsForTensorsAtDenseDims(builder, loc, tids, dims); @@ -703,8 +758,19 @@ } if (isSingletonDLT(dimType)) { Value pLo = lvl == 0 ? c0 : pidxs[tid][lvl - 1]; - Value pHi = builder.create(loc, pLo, c1); - + Value pHi; + // If this is non-unique, the pHi is bound by the segment high of the + // previous levvel. + if (!isUniqueDLT(dimTypes[tid][lvl - 1])) + pHi = segHi[tid][lvl - 1]; + + // If pHi is still uninitialized, we set it to one as it is a singleton + // level. + // NOTE: Even if the level is non-unique, the pHi might not have been set + // in the previous statement, as we only compute segment high when we are + // coiterating non-unique levels. + if (!pHi) + pHi = builder.create(loc, pLo, c1); pidxs[tid][lvl] = pLo; highs[tid][lvl] = pHi; return; @@ -721,7 +787,9 @@ // Initialize dense positions. Note that we generate dense coordinates of the // output tensor unconditionally, since they may not appear in the lattice, // but may be needed for linearized codegen. - for (auto [tid, dim] : llvm::zip(tids, dims)) { + for (auto [t, d] : llvm::zip(tids, dims)) { + auto tid = t; + auto dim = d; if (isDenseDLT(dimTypes[tid][dim])) { auto enc = getSparseTensorEncoding(tensors[tid].getType()); if (enc && !isSparseOutput(tid)) { @@ -845,13 +913,20 @@ Value op3 = pidxs[tid][dim]; Value cmp = builder.create(loc, arith::CmpIPredicate::eq, op1, iv); - Value add = builder.create(loc, op3, one); + // If the loop contains a coiteration with non-unique level, we fast + // forwards all the duplicated coords by setting the position to the + // segment high. + Value add = !isUniqueDLT(dimTypes[tid][dim]) + ? segHi[tid][dim] + : builder.create(loc, op3, one); operands.push_back(builder.create(loc, cmp, add, op3)); // Following loops continue iteration from the break point of the // current while loop. pidxs[tid][dim] = whileOp->getResult(o++); // The coordinates are invalid now. coord[tid][dim] = nullptr; + // The segment high are invalid now + segHi[tid][dim] = nullptr; // highs remains unchanged. } } diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp --- a/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp @@ -1451,7 +1451,6 @@ unsigned lts = env.merger().optimizeSet(env.merger().buildLattices(exp, idx)); // TODO: sort - // TODO: dedup // Start a loop sequence. bool needsUniv = startLoopSeq(env, rewriter, exp, at, idx, ldx, lts); diff --git a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir --- a/mlir/test/Dialect/SparseTensor/sorted_coo.mlir +++ b/mlir/test/Dialect/SparseTensor/sorted_coo.mlir @@ -105,86 +105,115 @@ return %0 : tensor<32xf64> } -// CHECK-LABEL: func.func @mateltmul( -// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, -// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, -// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x64xf64>) -> tensor<32x64xf64> { -// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f64 -// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index -// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index -// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> -// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref -// CHECK-DAG: %[[VAL_14:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64> -// CHECK-DAG: linalg.fill ins(%[[VAL_3]] : f64) outs(%[[VAL_14]] : memref<32x64xf64>) -// CHECK-DAG: %[[VAL_15:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref -// CHECK-DAG: %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref -// CHECK-DAG: %[[VAL_17:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_4]]] : memref -// CHECK-DAG: %[[VAL_18:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_5]]] : memref -// CHECK: %[[VAL_19:.*]]:2 = scf.while (%[[VAL_20:.*]] = %[[VAL_15]], %[[VAL_21:.*]] = %[[VAL_17]]) : (index, index) -> (index, index) { -// CHECK: %[[VAL_22:.*]] = arith.cmpi ult, %[[VAL_20]], %[[VAL_16]] : index -// CHECK: %[[VAL_23:.*]] = arith.cmpi ult, %[[VAL_21]], %[[VAL_18]] : index -// CHECK: %[[VAL_24:.*]] = arith.andi %[[VAL_22]], %[[VAL_23]] : i1 -// CHECK: scf.condition(%[[VAL_24]]) %[[VAL_20]], %[[VAL_21]] : index, index -// CHECK: } do { -// CHECK: ^bb0(%[[VAL_25:.*]]: index, %[[VAL_26:.*]]: index): -// CHECK: %[[VAL_27:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_25]]] : memref> -// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_26]]] : memref> -// CHECK: %[[VAL_29:.*]] = arith.cmpi ult, %[[VAL_28]], %[[VAL_27]] : index -// CHECK: %[[VAL_30:.*]] = arith.select %[[VAL_29]], %[[VAL_28]], %[[VAL_27]] : index -// CHECK: %[[VAL_31:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_30]] : index -// CHECK: %[[VAL_32:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_30]] : index -// CHECK: %[[VAL_33:.*]] = arith.andi %[[VAL_31]], %[[VAL_32]] : i1 -// CHECK: scf.if %[[VAL_33]] { -// CHECK: %[[VAL_34:.*]] = arith.addi %[[VAL_25]], %[[VAL_5]] : index -// CHECK: %[[VAL_35:.*]] = arith.addi %[[VAL_26]], %[[VAL_5]] : index -// CHECK: %[[VAL_36:.*]]:2 = scf.while (%[[VAL_37:.*]] = %[[VAL_25]], %[[VAL_38:.*]] = %[[VAL_26]]) : (index, index) -> (index, index) { -// CHECK: %[[VAL_39:.*]] = arith.cmpi ult, %[[VAL_37]], %[[VAL_34]] : index -// CHECK: %[[VAL_40:.*]] = arith.cmpi ult, %[[VAL_38]], %[[VAL_35]] : index -// CHECK: %[[VAL_41:.*]] = arith.andi %[[VAL_39]], %[[VAL_40]] : i1 -// CHECK: scf.condition(%[[VAL_41]]) %[[VAL_37]], %[[VAL_38]] : index, index +// CHECK-LABEL: func.func @mateltmul( +// CHECK-SAME: %[[VAL_0:.*0]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, +// CHECK-SAME: %[[VAL_1:.*1]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>>, +// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x64xf64>) -> tensor<32x64xf64> { +// CHECK-DAG: %[[VAL_3:.*]] = arith.constant false +// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f64 +// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 0 : index +// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index +// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_9:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_11:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK-DAG: %[[VAL_12:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 0 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 1 : index} : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref> +// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed-nu", "singleton" ] }>> to memref +// CHECK: %[[VAL_15:.*]] = bufferization.to_memref %[[VAL_2]] : memref<32x64xf64> +// CHECK: linalg.fill ins(%[[VAL_4]] : f64) outs(%[[VAL_15]] : memref<32x64xf64>) +// CHECK: %[[VAL_16:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_5]]] : memref +// CHECK: %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref +// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_5]]] : memref +// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_6]]] : memref +// CHECK: %[[VAL_20:.*]]:2 = scf.while (%[[VAL_21:.*]] = %[[VAL_16]], %[[VAL_22:.*]] = %[[VAL_18]]) : (index, index) -> (index, index) { +// CHECK: %[[VAL_23:.*]] = arith.cmpi ult, %[[VAL_21]], %[[VAL_17]] : index +// CHECK: %[[VAL_24:.*]] = arith.cmpi ult, %[[VAL_22]], %[[VAL_19]] : index +// CHECK: %[[VAL_25:.*]] = arith.andi %[[VAL_23]], %[[VAL_24]] : i1 +// CHECK: scf.condition(%[[VAL_25]]) %[[VAL_21]], %[[VAL_22]] : index, index +// CHECK: } do { +// CHECK: ^bb0(%[[VAL_26:.*]]: index, %[[VAL_27:.*]]: index): +// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_26]]] : memref> +// CHECK: %[[VAL_29:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_27]]] : memref> +// CHECK: %[[VAL_30:.*]] = arith.cmpi ult, %[[VAL_29]], %[[VAL_28]] : index +// CHECK: %[[VAL_31:.*]] = arith.select %[[VAL_30]], %[[VAL_29]], %[[VAL_28]] : index +// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_26]]] : memref> +// CHECK: %[[VAL_33:.*]] = scf.while (%[[VAL_34:.*]] = %[[VAL_26]]) : (index) -> index { +// CHECK: %[[VAL_35:.*]] = arith.cmpi ult, %[[VAL_34]], %[[VAL_17]] : index +// CHECK: %[[VAL_36:.*]] = scf.if %[[VAL_35]] -> (i1) { +// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_34]]] : memref> +// CHECK: %[[VAL_38:.*]] = arith.cmpi eq, %[[VAL_37]], %[[VAL_32]] : index +// CHECK: scf.yield %[[VAL_38]] : i1 +// CHECK: } else { +// CHECK: scf.yield %[[VAL_3]] : i1 +// CHECK: } +// CHECK: scf.condition(%[[VAL_39:.*]]) %[[VAL_34]] : index // CHECK: } do { -// CHECK: ^bb0(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index): -// CHECK: %[[VAL_44:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_42]]] : memref> -// CHECK: %[[VAL_45:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_43]]] : memref> -// CHECK: %[[VAL_46:.*]] = arith.cmpi ult, %[[VAL_45]], %[[VAL_44]] : index -// CHECK: %[[VAL_47:.*]] = arith.select %[[VAL_46]], %[[VAL_45]], %[[VAL_44]] : index -// CHECK: %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index -// CHECK: %[[VAL_49:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index -// CHECK: %[[VAL_50:.*]] = arith.andi %[[VAL_48]], %[[VAL_49]] : i1 -// CHECK: scf.if %[[VAL_50]] { -// CHECK: %[[VAL_51:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_42]]] : memref -// CHECK: %[[VAL_52:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_43]]] : memref -// CHECK: %[[VAL_53:.*]] = arith.mulf %[[VAL_51]], %[[VAL_52]] : f64 -// CHECK: memref.store %[[VAL_53]], %[[VAL_14]]{{\[}}%[[VAL_30]], %[[VAL_47]]] : memref<32x64xf64> +// CHECK: ^bb0(%[[VAL_40:.*]]: index): +// CHECK: %[[VAL_41:.*]] = arith.addi %[[VAL_40]], %[[VAL_6]] : index +// CHECK: scf.yield %[[VAL_41]] : index +// CHECK: } +// CHECK: %[[VAL_42:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_27]]] : memref> +// CHECK: %[[VAL_43:.*]] = scf.while (%[[VAL_44:.*]] = %[[VAL_27]]) : (index) -> index { +// CHECK: %[[VAL_45:.*]] = arith.cmpi ult, %[[VAL_44]], %[[VAL_19]] : index +// CHECK: %[[VAL_46:.*]] = scf.if %[[VAL_45]] -> (i1) { +// CHECK: %[[VAL_47:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_44]]] : memref> +// CHECK: %[[VAL_48:.*]] = arith.cmpi eq, %[[VAL_47]], %[[VAL_42]] : index +// CHECK: scf.yield %[[VAL_48]] : i1 // CHECK: } else { +// CHECK: scf.yield %[[VAL_3]] : i1 // CHECK: } -// CHECK: %[[VAL_54:.*]] = arith.cmpi eq, %[[VAL_44]], %[[VAL_47]] : index -// CHECK: %[[VAL_55:.*]] = arith.addi %[[VAL_42]], %[[VAL_5]] : index -// CHECK: %[[VAL_56:.*]] = arith.select %[[VAL_54]], %[[VAL_55]], %[[VAL_42]] : index -// CHECK: %[[VAL_57:.*]] = arith.cmpi eq, %[[VAL_45]], %[[VAL_47]] : index -// CHECK: %[[VAL_58:.*]] = arith.addi %[[VAL_43]], %[[VAL_5]] : index -// CHECK: %[[VAL_59:.*]] = arith.select %[[VAL_57]], %[[VAL_58]], %[[VAL_43]] : index -// CHECK: scf.yield %[[VAL_56]], %[[VAL_59]] : index, index +// CHECK: scf.condition(%[[VAL_49:.*]]) %[[VAL_44]] : index +// CHECK: } do { +// CHECK: ^bb0(%[[VAL_50:.*]]: index): +// CHECK: %[[VAL_51:.*]] = arith.addi %[[VAL_50]], %[[VAL_6]] : index +// CHECK: scf.yield %[[VAL_51]] : index +// CHECK: } +// CHECK: %[[VAL_52:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_31]] : index +// CHECK: %[[VAL_53:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_31]] : index +// CHECK: %[[VAL_54:.*]] = arith.andi %[[VAL_52]], %[[VAL_53]] : i1 +// CHECK: scf.if %[[VAL_54]] { +// CHECK: %[[VAL_55:.*]]:2 = scf.while (%[[VAL_56:.*]] = %[[VAL_26]], %[[VAL_57:.*]] = %[[VAL_27]]) : (index, index) -> (index, index) { +// CHECK: %[[VAL_58:.*]] = arith.cmpi ult, %[[VAL_56]], %[[VAL_59:.*]] : index +// CHECK: %[[VAL_60:.*]] = arith.cmpi ult, %[[VAL_57]], %[[VAL_61:.*]] : index +// CHECK: %[[VAL_62:.*]] = arith.andi %[[VAL_58]], %[[VAL_60]] : i1 +// CHECK: scf.condition(%[[VAL_62]]) %[[VAL_56]], %[[VAL_57]] : index, index +// CHECK: } do { +// CHECK: ^bb0(%[[VAL_63:.*]]: index, %[[VAL_64:.*]]: index): +// CHECK: %[[VAL_65:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_63]]] : memref> +// CHECK: %[[VAL_66:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_64]]] : memref> +// CHECK: %[[VAL_67:.*]] = arith.cmpi ult, %[[VAL_66]], %[[VAL_65]] : index +// CHECK: %[[VAL_68:.*]] = arith.select %[[VAL_67]], %[[VAL_66]], %[[VAL_65]] : index +// CHECK: %[[VAL_69:.*]] = arith.cmpi eq, %[[VAL_65]], %[[VAL_68]] : index +// CHECK: %[[VAL_70:.*]] = arith.cmpi eq, %[[VAL_66]], %[[VAL_68]] : index +// CHECK: %[[VAL_71:.*]] = arith.andi %[[VAL_69]], %[[VAL_70]] : i1 +// CHECK: scf.if %[[VAL_71]] { +// CHECK: %[[VAL_72:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_63]]] : memref +// CHECK: %[[VAL_73:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_64]]] : memref +// CHECK: %[[VAL_74:.*]] = arith.mulf %[[VAL_72]], %[[VAL_73]] : f64 +// CHECK: memref.store %[[VAL_74]], %[[VAL_15]]{{\[}}%[[VAL_31]], %[[VAL_68]]] : memref<32x64xf64> +// CHECK: } else { +// CHECK: } +// CHECK: %[[VAL_75:.*]] = arith.cmpi eq, %[[VAL_65]], %[[VAL_68]] : index +// CHECK: %[[VAL_76:.*]] = arith.addi %[[VAL_63]], %[[VAL_6]] : index +// CHECK: %[[VAL_77:.*]] = arith.select %[[VAL_75]], %[[VAL_76]], %[[VAL_63]] : index +// CHECK: %[[VAL_78:.*]] = arith.cmpi eq, %[[VAL_66]], %[[VAL_68]] : index +// CHECK: %[[VAL_79:.*]] = arith.addi %[[VAL_64]], %[[VAL_6]] : index +// CHECK: %[[VAL_80:.*]] = arith.select %[[VAL_78]], %[[VAL_79]], %[[VAL_64]] : index +// CHECK: scf.yield %[[VAL_77]], %[[VAL_80]] : index, index +// CHECK: } attributes {"Emitted from" = "linalg.generic"} +// CHECK: } else { // CHECK: } -// CHECK: } else { -// CHECK: } -// CHECK: %[[VAL_60:.*]] = arith.cmpi eq, %[[VAL_27]], %[[VAL_30]] : index -// CHECK: %[[VAL_61:.*]] = arith.addi %[[VAL_25]], %[[VAL_5]] : index -// CHECK: %[[VAL_62:.*]] = arith.select %[[VAL_60]], %[[VAL_61]], %[[VAL_25]] : index -// CHECK: %[[VAL_63:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_30]] : index -// CHECK: %[[VAL_64:.*]] = arith.addi %[[VAL_26]], %[[VAL_5]] : index -// CHECK: %[[VAL_65:.*]] = arith.select %[[VAL_63]], %[[VAL_64]], %[[VAL_26]] : index -// CHECK: scf.yield %[[VAL_62]], %[[VAL_65]] : index, index +// CHECK: %[[VAL_81:.*]] = arith.cmpi eq, %[[VAL_28]], %[[VAL_31]] : index +// CHECK: %[[VAL_82:.*]] = arith.select %[[VAL_81]], %[[VAL_83:.*]], %[[VAL_26]] : index +// CHECK: %[[VAL_84:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_31]] : index +// CHECK: %[[VAL_85:.*]] = arith.select %[[VAL_84]], %[[VAL_86:.*]], %[[VAL_27]] : index +// CHECK: scf.yield %[[VAL_82]], %[[VAL_85]] : index, index +// CHECK: } attributes {"Emitted from" = "linalg.generic"} +// CHECK: %[[VAL_87:.*]] = bufferization.to_tensor %[[VAL_15]] : memref<32x64xf64> +// CHECK: return %[[VAL_87]] : tensor<32x64xf64> // CHECK: } -// CHECK: %[[VAL_66:.*]] = bufferization.to_tensor %[[VAL_14]] : memref<32x64xf64> -// CHECK: return %[[VAL_66]] : tensor<32x64xf64> -// CHECK: } func.func @mateltmul(%argx: tensor<32x64xf64, #SortedCOO>, %argy: tensor<32x64xf64, #SortedCOO>, %argz: tensor<32x64xf64>) -> tensor<32x64xf64> { diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir new file mode 100644 --- /dev/null +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_coo_test.mlir @@ -0,0 +1,181 @@ +// DEFINE: %{option} = enable-runtime-library=true +// DEFINE: %{compile} = mlir-opt %s --sparse-compiler=%{option} +// DEFINE: %{run} = mlir-cpu-runner \ +// DEFINE: -e entry -entry-point-result=void \ +// DEFINE: -shared-libs=%mlir_c_runner_utils | \ +// DEFINE: FileCheck %s +// +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation. +// REDEFINE: %{option} = enable-runtime-library=false +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation and vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=2 reassociate-fp-reductions=true enable-index-optimizations=true" +// RUN: %{compile} | %{run} +// +// Do the same run, but now with direct IR generation and, if available, VLA +// vectorization. +// REDEFINE: %{option} = "enable-runtime-library=false vl=4 enable-arm-sve=%ENABLE_VLA" +// REDEFINE: %{run} = %lli \ +// REDEFINE: --entry-function=entry_lli \ +// REDEFINE: --extra-module=%S/Inputs/main_for_lli.ll \ +// REDEFINE: %VLA_ARCH_ATTR_OPTIONS \ +// REDEFINE: --dlopen=%mlir_native_utils_lib_dir/libmlir_c_runner_utils%shlibext | \ +// REDEFINE: FileCheck %s +// RUN: %{compile} | mlir-translate -mlir-to-llvmir | %{run} + +#SortedCOO = #sparse_tensor.encoding<{ + dimLevelType = [ "compressed-nu", "singleton" ] +}> + +#CSR = #sparse_tensor.encoding<{ + dimLevelType = [ "dense", "compressed" ] +}> + +#trait = { + indexing_maps = [ + affine_map<(i,j) -> (i,j)>, // A + affine_map<(i,j) -> (i,j)>, // B + affine_map<(i,j) -> (i,j)> // X (out) + ], + iterator_types = ["parallel", "parallel"], + doc = "X(i,j) = A(i,j) + B(i,j)" +} + +module { + func.func @add_coo_csr(%arga: tensor<8x8xf32, #CSR>, + %argb: tensor<8x8xf32, #SortedCOO>) + -> tensor<8x8xf32> { + %empty = tensor.empty() : tensor<8x8xf32> + %zero = arith.constant 0.000000e+00 : f32 + %init = linalg.fill + ins(%zero : f32) + outs(%empty : tensor<8x8xf32>) -> tensor<8x8xf32> + %0 = linalg.generic #trait + ins(%arga, %argb: tensor<8x8xf32, #CSR>, + tensor<8x8xf32, #SortedCOO>) + outs(%init: tensor<8x8xf32>) { + ^bb(%a: f32, %b: f32, %x: f32): + %0 = arith.addf %a, %b : f32 + linalg.yield %0 : f32 + } -> tensor<8x8xf32> + return %0 : tensor<8x8xf32> + } + + func.func @add_coo_coo(%arga: tensor<8x8xf32, #SortedCOO>, + %argb: tensor<8x8xf32, #SortedCOO>) + -> tensor<8x8xf32> { + %empty = tensor.empty() : tensor<8x8xf32> + %zero = arith.constant 0.000000e+00 : f32 + %init = linalg.fill + ins(%zero : f32) + outs(%empty : tensor<8x8xf32>) -> tensor<8x8xf32> + %0 = linalg.generic #trait + ins(%arga, %argb: tensor<8x8xf32, #SortedCOO>, + tensor<8x8xf32, #SortedCOO>) + outs(%init: tensor<8x8xf32>) { + ^bb(%a: f32, %b: f32, %x: f32): + %0 = arith.addf %a, %b : f32 + linalg.yield %0 : f32 + } -> tensor<8x8xf32> + return %0 : tensor<8x8xf32> + } + + func.func @add_coo_dense(%arga: tensor<8x8xf32>, + %argb: tensor<8x8xf32, #SortedCOO>) + -> tensor<8x8xf32> { + %empty = tensor.empty() : tensor<8x8xf32> + %zero = arith.constant 0.000000e+00 : f32 + %init = linalg.fill + ins(%zero : f32) + outs(%empty : tensor<8x8xf32>) -> tensor<8x8xf32> + %0 = linalg.generic #trait + ins(%arga, %argb: tensor<8x8xf32>, + tensor<8x8xf32, #SortedCOO>) + outs(%init: tensor<8x8xf32>) { + ^bb(%a: f32, %b: f32, %x: f32): + %0 = arith.addf %a, %b : f32 + linalg.yield %0 : f32 + } -> tensor<8x8xf32> + return %0 : tensor<8x8xf32> + } + + func.func @entry() { + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %c8 = arith.constant 8 : index + + %A = arith.constant dense< + [ [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0 ], + [ 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1 ], + [ 2.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2 ], + [ 3.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3 ], + [ 4.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4 ], + [ 5.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5 ], + [ 6.6, 2.6, 3.6, 4.6, 5.6, 6.6, 7.6, 8.6 ], + [ 7.7, 2.7, 3.7, 4.7, 5.7, 6.7, 7.7, 8.7 ] ] + > : tensor<8x8xf32> + %B = arith.constant dense< + [ [ 7.8, 2.8, 3.8, 0.8, 3.8, 0.1, 7.8, 8.8 ], + [ 3.3, 2.3, 1.3, 4.3, 3.3, 6.3, 9.3, 8.3 ], + [ 6.6, 2.6, 3.6, 4.6, 3.6, 6.6, 7.6, 7.6 ], + [ 1.0, 3.0, 3.0, 4.0, 3.0, 6.0, 7.0, 8.0 ], + [ 0.1, 2.1, 3.1, 4.1, 3.1, 6.1, 7.1, 8.1 ], + [ 4.4, 2.4, 3.4, 4.4, 3.4, 6.4, 8.4, 8.4 ], + [ 5.5, 3.5, 1.5, 4.5, 3.5, 6.5, 7.5, 8.5 ], + [ 7.7, 2.7, 3.7, 0.7, 5.7, 3.7, 3.7, 0.7 ] ] + > : tensor<8x8xf32> + + // Stress test with a "sparse" version of A and B. + %CSR_A = sparse_tensor.convert %A + : tensor<8x8xf32> to tensor<8x8xf32, #CSR> + %COO_A = sparse_tensor.convert %A + : tensor<8x8xf32> to tensor<8x8xf32, #SortedCOO> + %COO_B = sparse_tensor.convert %B + : tensor<8x8xf32> to tensor<8x8xf32, #SortedCOO> + + %C1 = call @add_coo_dense(%A, %COO_B) : (tensor<8x8xf32>, + tensor<8x8xf32, #SortedCOO>) + -> tensor<8x8xf32> + %C2 = call @add_coo_csr(%CSR_A, %COO_B) : (tensor<8x8xf32, #CSR>, + tensor<8x8xf32, #SortedCOO>) + -> tensor<8x8xf32> + %C3 = call @add_coo_coo(%COO_A, %COO_B) : (tensor<8x8xf32, #SortedCOO>, + tensor<8x8xf32, #SortedCOO>) + -> tensor<8x8xf32> + // + // Verify computed matrix C. + // + // CHECK-COUNT-3: ( 8.8, 4.8, 6.8, 4.8, 8.8, 6.1, 14.8, 16.8 ) + // CHECK-NEXT-COUNT-3: ( 4.4, 4.4, 4.4, 8.4, 8.4, 12.4, 16.4, 16.4 ) + // CHECK-NEXT-COUNT-3: ( 8.8, 4.8, 6.8, 8.8, 8.8, 12.8, 14.8, 15.8 ) + // CHECK-NEXT-COUNT-3: ( 4.3, 5.3, 6.3, 8.3, 8.3, 12.3, 14.3, 16.3 ) + // CHECK-NEXT-COUNT-3: ( 4.5, 4.5, 6.5, 8.5, 8.5, 12.5, 14.5, 16.5 ) + // CHECK-NEXT-COUNT-3: ( 9.9, 4.9, 6.9, 8.9, 8.9, 12.9, 15.9, 16.9 ) + // CHECK-NEXT-COUNT-3: ( 12.1, 6.1, 5.1, 9.1, 9.1, 13.1, 15.1, 17.1 ) + // CHECK-NEXT-COUNT-3: ( 15.4, 5.4, 7.4, 5.4, 11.4, 10.4, 11.4, 9.4 ) + // + %f0 = arith.constant 0.0 : f32 + scf.for %i = %c0 to %c8 step %c1 { + %v1 = vector.transfer_read %C1[%i, %c0], %f0 + : tensor<8x8xf32>, vector<8xf32> + %v2 = vector.transfer_read %C2[%i, %c0], %f0 + : tensor<8x8xf32>, vector<8xf32> + %v3 = vector.transfer_read %C3[%i, %c0], %f0 + : tensor<8x8xf32>, vector<8xf32> + vector.print %v1 : vector<8xf32> + vector.print %v2 : vector<8xf32> + vector.print %v3 : vector<8xf32> + } + + // Release resources. + bufferization.dealloc_tensor %CSR_A : tensor<8x8xf32, #CSR> + bufferization.dealloc_tensor %COO_A : tensor<8x8xf32, #SortedCOO> + bufferization.dealloc_tensor %COO_B : tensor<8x8xf32, #SortedCOO> + + + return + } +} \ No newline at end of file