diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2687,6 +2687,30 @@ DAG.getTargetConstant(FRM, DL, Subtarget.getXLenVT())); } +static SDValue +getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, SDLoc DL, + EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, + SDValue VL, + unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { + if (Merge.isUndef()) + Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); + SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp}; + return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops); +} + +static SDValue +getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, SDLoc DL, + EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, + SDValue VL, + unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { + if (Merge.isUndef()) + Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); + SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp}; + return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops); +} + struct VIDSequence { int64_t StepNumerator; unsigned StepDenominator; @@ -3202,10 +3226,29 @@ // TODO: Use vfslide1down. return SDValue(); + const unsigned Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + SDValue Vec = DAG.getUNDEF(ContainerVT); - for (const SDValue &V : Op->ops()) + unsigned UndefCount = 0; + for (const SDValue &V : Op->ops()) { + if (V.isUndef()) { + UndefCount++; + continue; + } + if (UndefCount) { + const SDValue Offset = DAG.getConstant(UndefCount, DL, Subtarget.getXLenVT()); + Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT), + Vec, Offset, Mask, VL, Policy); + UndefCount = 0; + } Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Vec, V, Mask, VL); + } + if (UndefCount) { + const SDValue Offset = DAG.getConstant(UndefCount, DL, Subtarget.getXLenVT()); + Vec = getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT), + Vec, Offset, Mask, VL, Policy); + } return convertFromScalableVector(VT, Vec, DAG, Subtarget); } @@ -3575,30 +3618,6 @@ return Res; } -static SDValue -getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, SDLoc DL, - EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, - SDValue VL, - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { - if (Merge.isUndef()) - Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; - SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); - SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp}; - return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops); -} - -static SDValue -getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, SDLoc DL, - EVT VT, SDValue Merge, SDValue Op, SDValue Offset, SDValue Mask, - SDValue VL, - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { - if (Merge.isUndef()) - Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; - SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); - SDValue Ops[] = {Merge, Op, Offset, Mask, VL, PolicyOp}; - return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops); -} - // Lower the following shuffle to vslidedown. // a) // t49: v8i8 = extract_subvector t13, Constant:i64<0> diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll @@ -223,7 +223,7 @@ ; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX8RV32-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX8RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX8RV32-NEXT: vsra.vi v8, v8, 1 ; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -240,7 +240,7 @@ ; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX8RV64-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX8RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX8RV64-NEXT: vsra.vi v8, v8, 1 ; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -257,7 +257,7 @@ ; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX1RV32-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX1RV32-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1RV32-NEXT: vsra.vi v8, v8, 1 ; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -274,7 +274,7 @@ ; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX1RV64-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX1RV64-NEXT: vadd.vv v8, v8, v8 ; LMULMAX1RV64-NEXT: vsra.vi v8, v8, 1 ; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -296,7 +296,7 @@ ; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX8RV32-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX8RV32-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX8RV32-NEXT: li a0, 127 ; LMULMAX8RV32-NEXT: vand.vx v8, v8, a0 ; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -313,7 +313,7 @@ ; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX8RV64-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX8RV64-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX8RV64-NEXT: li a0, 127 ; LMULMAX8RV64-NEXT: vand.vx v8, v8, a0 ; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -330,7 +330,7 @@ ; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX1RV32-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX1RV32-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX1RV32-NEXT: li a0, 127 ; LMULMAX1RV32-NEXT: vand.vx v8, v8, a0 ; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma @@ -347,7 +347,7 @@ ; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a1 ; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a2 ; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a0 -; LMULMAX1RV64-NEXT: vslide1down.vx v8, v8, a0 +; LMULMAX1RV64-NEXT: vslidedown.vi v8, v8, 1 ; LMULMAX1RV64-NEXT: li a0, 127 ; LMULMAX1RV64-NEXT: vand.vx v8, v8, a0 ; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-load.ll @@ -48,9 +48,7 @@ ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a0 -; RV32-NEXT: vslide1down.vx v8, v8, a0 -; RV32-NEXT: vslide1down.vx v8, v8, a0 -; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: ret ; ; RV64-LABEL: load_v5i8_align1: @@ -82,9 +80,7 @@ ; RV64-NEXT: vslide1down.vx v8, v8, a2 ; RV64-NEXT: vslide1down.vx v8, v8, a1 ; RV64-NEXT: vslide1down.vx v8, v8, a0 -; RV64-NEXT: vslide1down.vx v8, v8, a0 -; RV64-NEXT: vslide1down.vx v8, v8, a0 -; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-NEXT: ret %x = load <5 x i8>, ptr %p, align 1 ret <5 x i8> %x @@ -195,8 +191,7 @@ ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a1 -; RV32-NEXT: vslide1down.vx v8, v8, a0 -; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: vand.vi v8, v8, 1 ; RV32-NEXT: vmsne.vi v0, v8, 0 ; RV32-NEXT: ret @@ -221,8 +216,7 @@ ; RV64-NEXT: vslide1down.vx v8, v8, a3 ; RV64-NEXT: vslide1down.vx v8, v8, a2 ; RV64-NEXT: vslide1down.vx v8, v8, a1 -; RV64-NEXT: vslide1down.vx v8, v8, a0 -; RV64-NEXT: vslide1down.vx v8, v8, a0 +; RV64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-NEXT: vand.vi v8, v8, 1 ; RV64-NEXT: vmsne.vi v0, v8, 0 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll @@ -119,7 +119,7 @@ ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: vslide1down.vx v8, v8, a2 -; RV32-NEXT: vslide1down.vx v8, v8, a0 +; RV32-NEXT: vslidedown.vi v8, v8, 1 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: vslidedown.vi v9, v8, 2 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -24,8 +24,7 @@ ; RV32-NEXT: vslide1down.vx v10, v10, a1 ; RV32-NEXT: srli a2, a2, 5 ; RV32-NEXT: vslide1down.vx v10, v10, a2 -; RV32-NEXT: vslide1down.vx v10, v10, a0 -; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: vslidedown.vi v10, v10, 2 ; RV32-NEXT: vand.vi v10, v10, 1 ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -60,8 +59,7 @@ ; RV64-NEXT: vslide1down.vx v10, v10, a1 ; RV64-NEXT: srli a2, a2, 5 ; RV64-NEXT: vslide1down.vx v10, v10, a2 -; RV64-NEXT: vslide1down.vx v10, v10, a0 -; RV64-NEXT: vslide1down.vx v10, v10, a0 +; RV64-NEXT: vslidedown.vi v10, v10, 2 ; RV64-NEXT: vand.vi v10, v10, 1 ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -103,8 +101,7 @@ ; RV32-NEXT: vslide1down.vx v10, v10, a1 ; RV32-NEXT: srli a2, a2, 5 ; RV32-NEXT: vslide1down.vx v10, v10, a2 -; RV32-NEXT: vslide1down.vx v10, v10, a0 -; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: vslidedown.vi v10, v10, 2 ; RV32-NEXT: vand.vi v10, v10, 1 ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -139,8 +136,7 @@ ; RV64-NEXT: vslide1down.vx v10, v10, a1 ; RV64-NEXT: srli a2, a2, 5 ; RV64-NEXT: vslide1down.vx v10, v10, a2 -; RV64-NEXT: vslide1down.vx v10, v10, a0 -; RV64-NEXT: vslide1down.vx v10, v10, a0 +; RV64-NEXT: vslidedown.vi v10, v10, 2 ; RV64-NEXT: vand.vi v10, v10, 1 ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -183,8 +179,7 @@ ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: srli a1, a1, 5 ; RV32-NEXT: vslide1down.vx v10, v10, a1 -; RV32-NEXT: vslide1down.vx v10, v10, a0 -; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: vslidedown.vi v10, v10, 2 ; RV32-NEXT: vand.vi v10, v10, 1 ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -219,8 +214,7 @@ ; RV64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-NEXT: srli a1, a1, 5 ; RV64-NEXT: vslide1down.vx v10, v10, a1 -; RV64-NEXT: vslide1down.vx v10, v10, a0 -; RV64-NEXT: vslide1down.vx v10, v10, a0 +; RV64-NEXT: vslidedown.vi v10, v10, 2 ; RV64-NEXT: vand.vi v10, v10, 1 ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -264,8 +258,7 @@ ; RV32-NEXT: vslide1down.vx v10, v10, a1 ; RV32-NEXT: srli a2, a2, 5 ; RV32-NEXT: vslide1down.vx v10, v10, a2 -; RV32-NEXT: vslide1down.vx v10, v10, a0 -; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: vslidedown.vi v10, v10, 2 ; RV32-NEXT: vand.vi v10, v10, 1 ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -300,8 +293,7 @@ ; RV64-NEXT: vslide1down.vx v10, v10, a1 ; RV64-NEXT: srli a2, a2, 5 ; RV64-NEXT: vslide1down.vx v10, v10, a2 -; RV64-NEXT: vslide1down.vx v10, v10, a0 -; RV64-NEXT: vslide1down.vx v10, v10, a0 +; RV64-NEXT: vslidedown.vi v10, v10, 2 ; RV64-NEXT: vand.vi v10, v10, 1 ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu @@ -343,8 +335,7 @@ ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: srli a1, a1, 5 ; RV32-NEXT: vslide1down.vx v10, v10, a1 -; RV32-NEXT: vslide1down.vx v10, v10, a0 -; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: vslidedown.vi v10, v10, 2 ; RV32-NEXT: vand.vi v10, v10, 1 ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -379,8 +370,7 @@ ; RV64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-NEXT: srli a1, a1, 5 ; RV64-NEXT: vslide1down.vx v10, v10, a1 -; RV64-NEXT: vslide1down.vx v10, v10, a0 -; RV64-NEXT: vslide1down.vx v10, v10, a0 +; RV64-NEXT: vslidedown.vi v10, v10, 2 ; RV64-NEXT: vand.vi v10, v10, 1 ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -423,8 +413,7 @@ ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: srli a1, a1, 5 ; RV32-NEXT: vslide1down.vx v10, v10, a1 -; RV32-NEXT: vslide1down.vx v10, v10, a0 -; RV32-NEXT: vslide1down.vx v10, v10, a0 +; RV32-NEXT: vslidedown.vi v10, v10, 2 ; RV32-NEXT: vand.vi v10, v10, 1 ; RV32-NEXT: vmsne.vi v0, v10, 0 ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma @@ -459,8 +448,7 @@ ; RV64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-NEXT: srli a1, a1, 5 ; RV64-NEXT: vslide1down.vx v10, v10, a1 -; RV64-NEXT: vslide1down.vx v10, v10, a0 -; RV64-NEXT: vslide1down.vx v10, v10, a0 +; RV64-NEXT: vslidedown.vi v10, v10, 2 ; RV64-NEXT: vand.vi v10, v10, 1 ; RV64-NEXT: vmsne.vi v0, v10, 0 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -656,8 +656,7 @@ ; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload ; RV32MV-NEXT: vslide1down.vx v8, v8, a0 ; RV32MV-NEXT: vslide1down.vx v8, v8, a1 -; RV32MV-NEXT: vslide1down.vx v8, v8, a0 -; RV32MV-NEXT: vslide1down.vx v8, v8, a0 +; RV32MV-NEXT: vslidedown.vi v8, v8, 2 ; RV32MV-NEXT: li a0, 85 ; RV32MV-NEXT: vmv.s.x v0, a0 ; RV32MV-NEXT: vmv.v.i v10, 1 @@ -760,7 +759,7 @@ ; RV64MV-NEXT: vslide1down.vx v8, v8, a1 ; RV64MV-NEXT: vslide1down.vx v8, v8, a3 ; RV64MV-NEXT: vslide1down.vx v8, v8, a2 -; RV64MV-NEXT: vslide1down.vx v8, v8, a0 +; RV64MV-NEXT: vslidedown.vi v8, v8, 1 ; RV64MV-NEXT: lui a1, %hi(.LCPI3_3) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI3_3) ; RV64MV-NEXT: vle64.v v10, (a1) diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll --- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll @@ -537,7 +537,7 @@ ; RV32MV-NEXT: lui a1, %hi(.LCPI4_0) ; RV32MV-NEXT: addi a1, a1, %lo(.LCPI4_0) ; RV32MV-NEXT: vle16.v v9, (a1) -; RV32MV-NEXT: vslide1down.vx v8, v8, a0 +; RV32MV-NEXT: vslidedown.vi v8, v8, 1 ; RV32MV-NEXT: vid.v v10 ; RV32MV-NEXT: vsub.vv v8, v8, v10 ; RV32MV-NEXT: vmul.vv v8, v8, v9 @@ -598,7 +598,7 @@ ; RV64MV-NEXT: lui a1, %hi(.LCPI4_0) ; RV64MV-NEXT: addi a1, a1, %lo(.LCPI4_0) ; RV64MV-NEXT: vle16.v v9, (a1) -; RV64MV-NEXT: vslide1down.vx v8, v8, a0 +; RV64MV-NEXT: vslidedown.vi v8, v8, 1 ; RV64MV-NEXT: vid.v v10 ; RV64MV-NEXT: vsub.vv v8, v8, v10 ; RV64MV-NEXT: vmul.vv v8, v8, v9