Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -9883,6 +9883,13 @@
 
     return SDValue();
   }
+  case RISCVISD::VSLIDEDOWN_VL:
+  case RISCVISD::VSLIDEUP_VL:
+    // vslidedown.vi undef, src, 0 -> src
+    // vslideup.vi   undef, src, 0 -> src
+    if (N->getOperand(0).isUndef() && isa<ConstantSDNode>(N->getOperand(2)) &&
+        cast<ConstantSDNode>(N->getOperand(2))->isZero())
+      return N->getOperand(1);
   }
 
   return SDValue();
Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -510,9 +510,7 @@
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.v.i v8, 0
 ; RV32-NEXT:    vslide1up.vx v9, v8, a1
-; RV32-NEXT:    vslide1up.vx v10, v9, a0
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 0
+; RV32-NEXT:    vslide1up.vx v8, v9, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitcast_i64_v4i16:
@@ -549,9 +547,7 @@
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.v.i v8, 0
 ; RV32-NEXT:    vslide1up.vx v9, v8, a1
-; RV32-NEXT:    vslide1up.vx v10, v9, a0
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 0
+; RV32-NEXT:    vslide1up.vx v8, v9, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitcast_i64_v2i32:
@@ -588,9 +584,7 @@
 ; RV32-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-NEXT:    vmv.v.i v8, 0
 ; RV32-NEXT:    vslide1up.vx v9, v8, a1
-; RV32-NEXT:    vslide1up.vx v10, v9, a0
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vslideup.vi v8, v10, 0
+; RV32-NEXT:    vslide1up.vx v8, v9, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: bitcast_i64_v1i64:
Index: llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
===================================================================
--- llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
+++ llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
@@ -201,9 +201,7 @@
 ; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-FP-NEXT:    vmv.v.i v8, 0
 ; RV32-FP-NEXT:    vslide1up.vx v9, v8, a1
-; RV32-FP-NEXT:    vslide1up.vx v10, v9, a0
-; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-FP-NEXT:    vslideup.vi v8, v10, 0
+; RV32-FP-NEXT:    vslide1up.vx v8, v9, a0
 ; RV32-FP-NEXT:    ret
 ;
 ; RV64-FP-LABEL: bitcast_i64_v4f16:
@@ -221,9 +219,7 @@
 ; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-FP-NEXT:    vmv.v.i v8, 0
 ; RV32-FP-NEXT:    vslide1up.vx v9, v8, a1
-; RV32-FP-NEXT:    vslide1up.vx v10, v9, a0
-; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-FP-NEXT:    vslideup.vi v8, v10, 0
+; RV32-FP-NEXT:    vslide1up.vx v8, v9, a0
 ; RV32-FP-NEXT:    ret
 ;
 ; RV64-FP-LABEL: bitcast_i64_v2f32:
@@ -241,9 +237,7 @@
 ; RV32-FP-NEXT:    vsetivli zero, 2, e32, m1, ta, ma
 ; RV32-FP-NEXT:    vmv.v.i v8, 0
 ; RV32-FP-NEXT:    vslide1up.vx v9, v8, a1
-; RV32-FP-NEXT:    vslide1up.vx v10, v9, a0
-; RV32-FP-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-FP-NEXT:    vslideup.vi v8, v10, 0
+; RV32-FP-NEXT:    vslide1up.vx v8, v9, a0
 ; RV32-FP-NEXT:    ret
 ;
 ; RV64-FP-LABEL: bitcast_i64_v1f64: