diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -225,9 +225,9 @@ for (unsigned i = 0; i < rank; i++) { if (shape[i] == ShapedType::kDynamicSize) continue; - auto constantOp = op.sizes()[i].getDefiningOp(); + auto constantOp = op.sizes()[i].getDefiningOp(); if (!constantOp || - constantOp.getValue().cast().getInt() != shape[i]) + constantOp.value().cast().getInt() != shape[i]) return op.emitError("unexpected mismatch with static dimension size ") << shape[i]; } diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -37,8 +37,8 @@ #SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> func @invalid_init_size() -> tensor { - %c10 = constant 10 : index - %c20 = constant 20 : index + %c10 = arith.constant 10 : index + %c20 = arith.constant 20 : index // expected-error@+1 {{unexpected mismatch with static dimension size 10}} %0 = sparse_tensor.init [%c10, %c20] : tensor return %0 : tensor diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -16,13 +16,13 @@ #SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}> // CHECK-LABEL: func @sparse_init() -// CHECK-DAG: %[[C16:.*]] = constant 16 : index -// CHECK-DAG: %[[C32:.*]] = constant 32 : index +// CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index +// CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index // CHECK: %[[T:.*]] = sparse_tensor.init[%[[C16]], %[[C32]]] : tensor // CHECK: return %[[T]] : tensor func @sparse_init() -> tensor { - %d1 = constant 16 : index - %d2 = constant 32 : index + %d1 = arith.constant 16 : index + %d2 = arith.constant 32 : index %0 = sparse_tensor.init [%d1, %d2] : tensor return %0 : tensor }