diff --git a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir --- a/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir +++ b/mlir/test/Dialect/SparseTensor/vectorize_reduction.mlir @@ -434,3 +434,444 @@ } // ----- + +// From this point forward, we essentially have the same test for all +// arithmetic operation. This is for a code coverage perspective. + +// Check that we vectorize xor. +// CHECK-ON-LABEL: func.func @sparse_reduction_xor( +// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON: %[[VAL_2:.*]] = arith.constant 8 : index +// CHECK-ON: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> +// CHECK-ON: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK-ON: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref +// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref +// CHECK-ON: %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xi32> +// CHECK-ON: %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xi32>) { +// CHECK-ON: %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]] +// CHECK-ON: %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1> +// CHECK-ON: %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref, vector<8xi1>, vector<8xi32> into vector<8xi32> +// CHECK-ON: %[[VAL_19:.*]] = arith.xori %[[VAL_15]], %[[VAL_18]] : vector<8xi32> +// CHECK-ON: %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xi32> +// CHECK-ON: scf.yield %[[VAL_20]] : vector<8xi32> +// CHECK-ON: } {"Emitted from" = "linalg.generic"} +// CHECK-ON: %[[VAL_21:.*]] = vector.reduction , %[[VAL_22:.*]] : vector<8xi32> into i32 +// CHECK-ON: memref.store %[[VAL_21]], %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref +// CHECK-ON: return %[[VAL_23]] : tensor +// CHECK-ON: } +// +// CHECK-OFF-LABEL: func.func @sparse_reduction_xor( +// CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK-OFF: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref +// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref +// CHECK-OFF: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) { +// CHECK-OFF: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref +// CHECK-OFF: %[[VAL_14:.*]] = arith.xori %[[VAL_12]], %[[VAL_13]] : i32 +// CHECK-OFF: scf.yield %[[VAL_14]] : i32 +// CHECK-OFF: } {"Emitted from" = "linalg.generic"} +// CHECK-OFF: memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref +// CHECK-OFF: return %[[VAL_16]] : tensor +// CHECK-OFF: } +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +#trait = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a (in) + affine_map<(i) -> ()> // x (out) + ], + iterator_types = ["reduction"] +} + +func.func @sparse_reduction_xor(%argx: tensor, + %arga: tensor) + -> tensor { + %0 = linalg.generic #trait + ins(%arga: tensor) + outs(%argx: tensor) { + ^bb(%a: i32, %x: i32): + %t = arith.xori %x, %a: i32 + linalg.yield %t : i32 + } -> tensor + return %0 : tensor +} + +// ----- +// Check that we vectorize and. +// CHECK-ON-LABEL: func.func @sparse_reduction_and( +// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON: %[[VAL_2:.*]] = arith.constant 8 : index +// CHECK-ON: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> +// CHECK-ON: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK-ON: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref +// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref +// CHECK-ON: %[[VAL_12:.*]] = vector.broadcast %[[VAL_9]] : i32 to vector<8xi32> +// CHECK-ON: %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xi32>) { +// CHECK-ON: %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]] +// CHECK-ON: %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1> +// CHECK-ON: %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref, vector<8xi1>, vector<8xi32> into vector<8xi32> +// CHECK-ON: %[[VAL_19:.*]] = arith.andi %[[VAL_15]], %[[VAL_18]] : vector<8xi32> +// CHECK-ON: %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xi32> +// CHECK-ON: scf.yield %[[VAL_20]] : vector<8xi32> +// CHECK-ON: } {"Emitted from" = "linalg.generic"} +// CHECK-ON: %[[VAL_21:.*]] = vector.reduction , %[[VAL_22:.*]] : vector<8xi32> into i32 +// CHECK-ON: memref.store %[[VAL_21]], %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref +// CHECK-ON: return %[[VAL_23]] : tensor +// CHECK-ON: } +// +// CHECK-OFF-LABEL: func.func @sparse_reduction_and( +// CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK-OFF: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref +// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref +// CHECK-OFF: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) { +// CHECK-OFF: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref +// CHECK-OFF: %[[VAL_14:.*]] = arith.andi %[[VAL_12]], %[[VAL_13]] : i32 +// CHECK-OFF: scf.yield %[[VAL_14]] : i32 +// CHECK-OFF: } {"Emitted from" = "linalg.generic"} +// CHECK-OFF: memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref +// CHECK-OFF: return %[[VAL_16]] : tensor +// CHECK-OFF: } +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +#trait = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a (in) + affine_map<(i) -> ()> // x (out) + ], + iterator_types = ["reduction"] +} + +func.func @sparse_reduction_and(%argx: tensor, + %arga: tensor) + -> tensor { + %0 = linalg.generic #trait + ins(%arga: tensor) + outs(%argx: tensor) { + ^bb(%a: i32, %x: i32): + %t = arith.andi %x, %a: i32 + linalg.yield %t : i32 + } -> tensor + return %0 : tensor +} + +// ----- +// Check that we vectorize muli. +// CHECK-ON-LABEL: func.func @sparse_reduction_muli( +// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON: %[[VAL_2:.*]] = arith.constant 8 : index +// CHECK-ON: %[[VAL_3:.*]] = arith.constant dense<1> : vector<8xi32> +// CHECK-ON: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK-ON: %[[VAL_5:.*]] = arith.constant dense<0> : vector<8xi32> +// CHECK-ON: %[[VAL_6:.*]] = arith.constant 1 : index +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_9:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_9]][] : memref +// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_4]]] : memref +// CHECK-ON: %[[VAL_12:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_6]]] : memref +// CHECK-ON: %[[VAL_13:.*]] = vector.insertelement %[[VAL_10]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xi32> +// CHECK-ON: %[[VAL_14:.*]] = scf.for %[[VAL_15:.*]] = %[[VAL_11]] to %[[VAL_12]] step %[[VAL_2]] iter_args(%[[VAL_16:.*]] = %[[VAL_13]]) -> (vector<8xi32>) { +// CHECK-ON: %[[VAL_17:.*]] = affine.min #map(%[[VAL_12]], %[[VAL_15]]){{\[}}%[[VAL_2]]] +// CHECK-ON: %[[VAL_18:.*]] = vector.create_mask %[[VAL_17]] : vector<8xi1> +// CHECK-ON: %[[VAL_19:.*]] = vector.maskedload %[[VAL_8]]{{\[}}%[[VAL_15]]], %[[VAL_18]], %[[VAL_5]] : memref, vector<8xi1>, vector<8xi32> into vector<8xi32> +// CHECK-ON: %[[VAL_20:.*]] = arith.muli %[[VAL_16]], %[[VAL_19]] : vector<8xi32> +// CHECK-ON: %[[VAL_21:.*]] = arith.select %[[VAL_18]], %[[VAL_20]], %[[VAL_16]] : vector<8xi1>, vector<8xi32> +// CHECK-ON: scf.yield %[[VAL_21]] : vector<8xi32> +// CHECK-ON: } {"Emitted from" = "linalg.generic"} +// CHECK-ON: %[[VAL_22:.*]] = vector.reduction , %[[VAL_23:.*]] : vector<8xi32> into i32 +// CHECK-ON: memref.store %[[VAL_22]], %[[VAL_9]][] : memref +// CHECK-ON: %[[VAL_24:.*]] = bufferization.to_tensor %[[VAL_9]] : memref +// CHECK-ON: return %[[VAL_24]] : tensor +// CHECK-ON: } +// +// CHECK-OFF-LABEL: func.func @sparse_reduction_muli( +// CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK-OFF: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref +// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref +// CHECK-OFF: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) { +// CHECK-OFF: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref +// CHECK-OFF: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_13]] : i32 +// CHECK-OFF: scf.yield %[[VAL_14]] : i32 +// CHECK-OFF: } {"Emitted from" = "linalg.generic"} +// CHECK-OFF: memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref +// CHECK-OFF: return %[[VAL_16]] : tensor +// CHECK-OFF: } +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +#trait = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a (in) + affine_map<(i) -> ()> // x (out) + ], + iterator_types = ["reduction"] +} + +func.func @sparse_reduction_muli(%argx: tensor, + %arga: tensor) + -> tensor { + %0 = linalg.generic #trait + ins(%arga: tensor) + outs(%argx: tensor) { + ^bb(%a: i32, %x: i32): + %t = arith.muli %x, %a: i32 + linalg.yield %t : i32 + } -> tensor + return %0 : tensor +} + +// ----- +// Check that we vectorize addi. +// CHECK-ON-LABEL: func.func @sparse_reduction_addi( +// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON: %[[VAL_2:.*]] = arith.constant 8 : index +// CHECK-ON: %[[VAL_3:.*]] = arith.constant dense<0> : vector<8xi32> +// CHECK-ON: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK-ON: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref +// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref +// CHECK-ON: %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xi32> +// CHECK-ON: %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xi32>) { +// CHECK-ON: %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]] +// CHECK-ON: %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1> +// CHECK-ON: %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref, vector<8xi1>, vector<8xi32> into vector<8xi32> +// CHECK-ON: %[[VAL_19:.*]] = arith.addi %[[VAL_15]], %[[VAL_18]] : vector<8xi32> +// CHECK-ON: %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xi32> +// CHECK-ON: scf.yield %[[VAL_20]] : vector<8xi32> +// CHECK-ON: } {"Emitted from" = "linalg.generic"} +// CHECK-ON: %[[VAL_21:.*]] = vector.reduction , %[[VAL_22:.*]] : vector<8xi32> into i32 +// CHECK-ON: memref.store %[[VAL_21]], %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref +// CHECK-ON: return %[[VAL_23]] : tensor +// CHECK-ON: } +// +// CHECK-OFF-LABEL: func.func @sparse_reduction_addi( +// CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK-OFF: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref +// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref +// CHECK-OFF: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (i32) { +// CHECK-OFF: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref +// CHECK-OFF: %[[VAL_14:.*]] = arith.addi %[[VAL_12]], %[[VAL_13]] : i32 +// CHECK-OFF: scf.yield %[[VAL_14]] : i32 +// CHECK-OFF: } {"Emitted from" = "linalg.generic"} +// CHECK-OFF: memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref +// CHECK-OFF: return %[[VAL_16]] : tensor +// CHECK-OFF: } +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +#trait = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a (in) + affine_map<(i) -> ()> // x (out) + ], + iterator_types = ["reduction"] +} + +func.func @sparse_reduction_addi(%argx: tensor, + %arga: tensor) + -> tensor { + %0 = linalg.generic #trait + ins(%arga: tensor) + outs(%argx: tensor) { + ^bb(%a: i32, %x: i32): + %t = arith.addi %x, %a: i32 + linalg.yield %t : i32 + } -> tensor + return %0 : tensor +} + +// ----- +// Check that we vectorize subf. +// CHECK-ON-LABEL: func.func @sparse_reduction_subf( +// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON: %[[VAL_2:.*]] = arith.constant 8 : index +// CHECK-ON: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> +// CHECK-ON: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK-ON: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref +// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref +// CHECK-ON: %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xf32> +// CHECK-ON: %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xf32>) { +// CHECK-ON: %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]] +// CHECK-ON: %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1> +// CHECK-ON: %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref, vector<8xi1>, vector<8xf32> into vector<8xf32> +// CHECK-ON: %[[VAL_19:.*]] = arith.subf %[[VAL_15]], %[[VAL_18]] : vector<8xf32> +// CHECK-ON: %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xf32> +// CHECK-ON: scf.yield %[[VAL_20]] : vector<8xf32> +// CHECK-ON: } {"Emitted from" = "linalg.generic"} +// CHECK-ON: %[[VAL_21:.*]] = vector.reduction , %[[VAL_22:.*]] : vector<8xf32> into f32 +// CHECK-ON: memref.store %[[VAL_21]], %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref +// CHECK-ON: return %[[VAL_23]] : tensor +// CHECK-ON: } +// +// CHECK-OFF-LABEL: func.func @sparse_reduction_subf( +// CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK-OFF: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref +// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref +// CHECK-OFF: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (f32) { +// CHECK-OFF: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref +// CHECK-OFF: %[[VAL_14:.*]] = arith.subf %[[VAL_12]], %[[VAL_13]] : f32 +// CHECK-OFF: scf.yield %[[VAL_14]] : f32 +// CHECK-OFF: } {"Emitted from" = "linalg.generic"} +// CHECK-OFF: memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref +// CHECK-OFF: return %[[VAL_16]] : tensor +// CHECK-OFF: } +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +#trait = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a (in) + affine_map<(i) -> ()> // x (out) + ], + iterator_types = ["reduction"] +} + +func.func @sparse_reduction_subf(%argx: tensor, + %arga: tensor) + -> tensor { + %0 = linalg.generic #trait + ins(%arga: tensor) + outs(%argx: tensor) { + ^bb(%a: f32, %x: f32): + %t = arith.subf %x, %a: f32 + linalg.yield %t : f32 + } -> tensor + return %0 : tensor +} + +// ----- +// Check that we vectorize addf. +// CHECK-ON-LABEL: func.func @sparse_reduction_addf( +// CHECK-ON-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-ON-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-ON: %[[VAL_2:.*]] = arith.constant 8 : index +// CHECK-ON: %[[VAL_3:.*]] = arith.constant dense<0.000000e+00> : vector<8xf32> +// CHECK-ON: %[[VAL_4:.*]] = arith.constant 0 : index +// CHECK-ON: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK-ON: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-ON: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-ON: %[[VAL_8:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-ON: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_10:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_4]]] : memref +// CHECK-ON: %[[VAL_11:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_5]]] : memref +// CHECK-ON: %[[VAL_12:.*]] = vector.insertelement %[[VAL_9]], %[[VAL_3]]{{\[}}%[[VAL_4]] : index] : vector<8xf32> +// CHECK-ON: %[[VAL_13:.*]] = scf.for %[[VAL_14:.*]] = %[[VAL_10]] to %[[VAL_11]] step %[[VAL_2]] iter_args(%[[VAL_15:.*]] = %[[VAL_12]]) -> (vector<8xf32>) { +// CHECK-ON: %[[VAL_16:.*]] = affine.min #map(%[[VAL_11]], %[[VAL_14]]){{\[}}%[[VAL_2]]] +// CHECK-ON: %[[VAL_17:.*]] = vector.create_mask %[[VAL_16]] : vector<8xi1> +// CHECK-ON: %[[VAL_18:.*]] = vector.maskedload %[[VAL_7]]{{\[}}%[[VAL_14]]], %[[VAL_17]], %[[VAL_3]] : memref, vector<8xi1>, vector<8xf32> into vector<8xf32> +// CHECK-ON: %[[VAL_19:.*]] = arith.addf %[[VAL_15]], %[[VAL_18]] : vector<8xf32> +// CHECK-ON: %[[VAL_20:.*]] = arith.select %[[VAL_17]], %[[VAL_19]], %[[VAL_15]] : vector<8xi1>, vector<8xf32> +// CHECK-ON: scf.yield %[[VAL_20]] : vector<8xf32> +// CHECK-ON: } {"Emitted from" = "linalg.generic"} +// CHECK-ON: %[[VAL_21:.*]] = vector.reduction , %[[VAL_22:.*]] : vector<8xf32> into f32 +// CHECK-ON: memref.store %[[VAL_21]], %[[VAL_8]][] : memref +// CHECK-ON: %[[VAL_23:.*]] = bufferization.to_tensor %[[VAL_8]] : memref +// CHECK-ON: return %[[VAL_23]] : tensor +// CHECK-ON: } +// +// CHECK-OFF-LABEL: func.func @sparse_reduction_addf( +// CHECK-OFF-SAME: %[[VAL_0:.*]]: tensor, +// CHECK-OFF-SAME: %[[VAL_1:.*]]: tensor>) -> tensor { +// CHECK-OFF: %[[VAL_2:.*]] = arith.constant 0 : index +// CHECK-OFF: %[[VAL_3:.*]] = arith.constant 1 : index +// CHECK-OFF: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_1]] {dimension = 0 : index} : tensor> to memref +// CHECK-OFF: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_1]] : tensor> to memref +// CHECK-OFF: %[[VAL_6:.*]] = bufferization.to_memref %[[VAL_0]] : memref +// CHECK-OFF: %[[VAL_7:.*]] = memref.load %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_8:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_2]]] : memref +// CHECK-OFF: %[[VAL_9:.*]] = memref.load %[[VAL_4]]{{\[}}%[[VAL_3]]] : memref +// CHECK-OFF: %[[VAL_10:.*]] = scf.for %[[VAL_11:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] iter_args(%[[VAL_12:.*]] = %[[VAL_7]]) -> (f32) { +// CHECK-OFF: %[[VAL_13:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_11]]] : memref +// CHECK-OFF: %[[VAL_14:.*]] = arith.addf %[[VAL_12]], %[[VAL_13]] : f32 +// CHECK-OFF: scf.yield %[[VAL_14]] : f32 +// CHECK-OFF: } {"Emitted from" = "linalg.generic"} +// CHECK-OFF: memref.store %[[VAL_15:.*]], %[[VAL_6]][] : memref +// CHECK-OFF: %[[VAL_16:.*]] = bufferization.to_tensor %[[VAL_6]] : memref +// CHECK-OFF: return %[[VAL_16]] : tensor +// CHECK-OFF: } +#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}> + +#trait = { + indexing_maps = [ + affine_map<(i) -> (i)>, // a (in) + affine_map<(i) -> ()> // x (out) + ], + iterator_types = ["reduction"] +} + +func.func @sparse_reduction_addf(%argx: tensor, + %arga: tensor) + -> tensor { + %0 = linalg.generic #trait + ins(%arga: tensor) + outs(%argx: tensor) { + ^bb(%a: f32, %x: f32): + %t = arith.addf %x, %a: f32 + linalg.yield %t : f32 + } -> tensor + return %0 : tensor +}