diff --git a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
--- a/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
+++ b/mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
@@ -435,7 +435,7 @@
     arguments.
   }];
 
-  let arguments = (outs Shape_SizeOrIndexType:$arg);
+  let arguments = (ins Shape_SizeOrIndexType:$arg);
   let results = (outs Index:$result);
 
   let assemblyFormat = "$arg attr-dict `:` type($arg)";
diff --git a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
--- a/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
+++ b/mlir/lib/Conversion/ShapeToStandard/ShapeToStandard.cpp
@@ -56,6 +56,21 @@
 };
 } // namespace
 
+namespace {
+class ConstSizeOpConversion : public OpConversionPattern<ConstSizeOp> {
+public:
+  using OpConversionPattern<ConstSizeOp>::OpConversionPattern;
+
+  LogicalResult
+  matchAndRewrite(ConstSizeOp op, ArrayRef<Value> operands,
+                  ConversionPatternRewriter &rewriter) const override {
+
+    rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, op.value().getSExtValue());
+    return success();
+  }
+};
+} // namespace
+
 namespace {
 class ShapeOfOpConversion : public OpConversionPattern<ShapeOfOp> {
 public:
@@ -103,6 +118,27 @@
   return success();
 }
 
+namespace {
+class ToExtentTensorOpConversion
+    : public OpConversionPattern<ToExtentTensorOp> {
+public:
+  using OpConversionPattern<ToExtentTensorOp>::OpConversionPattern;
+
+  LogicalResult
+  matchAndRewrite(ToExtentTensorOp op, ArrayRef<Value> operands,
+                  ConversionPatternRewriter &rewriter) const override {
+    ToExtentTensorOpAdaptor adaptor(operands);
+
+    if (!adaptor.input().getType().isa<RankedTensorType>())
+      return rewriter.notifyMatchFailure(op, "input needs to be a tensor");
+
+    rewriter.replaceOpWithNewOp<TensorCastOp>(op, adaptor.input(),
+                                              op.getType());
+    return success();
+  }
+};
+} // namespace
+
 namespace {
 class GetExtentOpConverter : public OpConversionPattern<GetExtentOp> {
   using OpConversionPattern<GetExtentOp>::OpConversionPattern;
@@ -210,9 +246,11 @@
       AnyOpConversion,
       BinaryOpConversion<AddOp, AddIOp>,
       BinaryOpConversion<MulOp, MulIOp>,
+      ConstSizeOpConversion,
       GetExtentOpConverter,
       RankOpConverter,
-      ShapeOfOpConversion>(ctx);
+      ShapeOfOpConversion,
+      ToExtentTensorOpConversion>(ctx);
   // clang-format on
 }
 
diff --git a/mlir/lib/Dialect/Shape/IR/Shape.cpp b/mlir/lib/Dialect/Shape/IR/Shape.cpp
--- a/mlir/lib/Dialect/Shape/IR/Shape.cpp
+++ b/mlir/lib/Dialect/Shape/IR/Shape.cpp
@@ -753,7 +753,7 @@
   // `IntegerAttr`s which makes constant folding simple.
   if (Attribute arg = operands[0])
     return arg;
-  return {};
+  return impl::foldCastOp(*this);
 }
 
 void SizeToIndexOp::getCanonicalizationPatterns(
@@ -812,7 +812,7 @@
 
 OpFoldResult ToExtentTensorOp::fold(ArrayRef<Attribute> operands) {
   if (!operands[0])
-    return nullptr;
+    return impl::foldCastOp(*this);
   Builder builder(getContext());
   auto shape = llvm::to_vector<6>(
       operands[0].cast<DenseIntElementsAttr>().getValues<int64_t>());
diff --git a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
--- a/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
+++ b/mlir/test/Conversion/ShapeToStandard/shape-to-standard.mlir
@@ -133,3 +133,28 @@
   return %result : tensor<?xindex>
 }
 
+// -----
+
+// Lower 'const_size` to `std.constant`
+// CHECK-LABEL: @const_size
+func @const_size() -> index {
+  // CHECK: %[[RES:.*]] = constant 42 : index
+  %size = shape.const_size 42
+  %result = shape.size_to_index %size : !shape.size
+  // CHECK: return %[[RES]]
+  return %result : index
+}
+
+// -----
+
+// Lower `to_extent_tensor` to `std.tensor_cast`
+// Fold to_extent_tensor when already on tensor.
+// CHECK-LABEL: @to_extent_tensor
+// CHECK-SAME: (%[[ARG:.*]]: tensor<?xindex>
+func @to_extent_tensor(%arg: tensor<?xindex>) -> tensor<3xindex> {
+  // CHECK-NOT: to_extent_tensor
+  // CHECK: %[[RES:.*]] = tensor_cast %[[ARG]] : tensor<?xindex> to tensor<3xindex
+  %casted = shape.to_extent_tensor %arg : tensor<?xindex> -> tensor<3xindex>
+  // CHECK: return %[[RES]]
+  return %casted : tensor<3xindex>
+}
diff --git a/mlir/test/Dialect/Shape/canonicalize.mlir b/mlir/test/Dialect/Shape/canonicalize.mlir
--- a/mlir/test/Dialect/Shape/canonicalize.mlir
+++ b/mlir/test/Dialect/Shape/canonicalize.mlir
@@ -774,3 +774,22 @@
   return %result : !shape.size
 }
 
+// -----
+
+// Fold index_cast when already on index.
+// CHECK-LABEL: @fold_index_cast_on_index
+func @fold_index_cast_on_index(%arg: index) -> index {
+  // CHECK-NOT: size_to_index
+  %casted = shape.size_to_index %arg : index
+  return %casted : index
+}
+
+// -----
+
+// Fold to_extent_tensor when already on tensor.
+// CHECK-LABEL: @fold_to_extent_tensor_on_tensor
+func @fold_to_extent_tensor_on_tensor(%arg: tensor<?xindex>) -> tensor<?xindex> {
+  // CHECK-NOT: to_extent_tensor
+  %casted = shape.to_extent_tensor %arg : tensor<?xindex> -> tensor<?xindex>
+  return %casted : tensor<?xindex>
+}