Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -4779,6 +4779,9 @@ SDValue VL = getVLOperand(Op); SDValue I32VL = DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT)); + SDValue Overflow = DAG.getSetCC(DL, XLenVT, I32VL, VL, ISD::SETULT); + I32VL = DAG.getSelect(DL, XLenVT, Overflow, + DAG.getConstant(0xFFFFFFFF, DL, XLenVT), I32VL); MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount()); SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL); Index: llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -13,7 +13,12 @@ define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB0_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB0_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 @@ -34,7 +39,12 @@ define @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB1_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB1_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 @@ -57,7 +67,12 @@ define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB2_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB2_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 @@ -79,8 +94,13 @@ define @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB3_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB3_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -98,8 +118,13 @@ define @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB4_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB4_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret Index: llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -886,8 +886,13 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: slli a2, a2, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: slli a4, a2, 1 +; RV32-NEXT: li a3, -1 +; RV32-NEXT: bltu a4, a2, .LBB31_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: .LBB31_2: # %entry +; RV32-NEXT: vsetvli zero, a3, e32, m1, tu, mu ; RV32-NEXT: vmv1r.v v10, v8 ; RV32-NEXT: vslide1down.vx v10, v9, a0 ; RV32-NEXT: vslide1down.vx v8, v10, a1 @@ -917,8 +922,13 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: slli a2, a2, 1 -; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, mu +; RV32-NEXT: slli a4, a2, 1 +; RV32-NEXT: li a3, -1 +; RV32-NEXT: bltu a4, a2, .LBB32_2 +; RV32-NEXT: # %bb.1: # %entry +; RV32-NEXT: mv a3, a4 +; RV32-NEXT: .LBB32_2: # %entry +; RV32-NEXT: vsetvli zero, a3, e32, m1, tu, mu ; RV32-NEXT: vmv1r.v v10, v8 ; RV32-NEXT: vslide1up.vx v10, v9, a1 ; RV32-NEXT: vslide1up.vx v8, v10, a0 Index: llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -856,8 +856,13 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB36_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB36_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -882,7 +887,12 @@ define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB37_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB37_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1down.vx v9, v9, a0 ; CHECK-NEXT: vslide1down.vx v9, v9, a1 @@ -909,8 +919,13 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB38_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB38_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -935,7 +950,12 @@ define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB39_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB39_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, mu ; CHECK-NEXT: vslide1down.vx v10, v10, a0 ; CHECK-NEXT: vslide1down.vx v10, v10, a1 @@ -962,8 +982,13 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB40_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB40_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -988,7 +1013,12 @@ define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB41_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB41_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu ; CHECK-NEXT: vslide1down.vx v12, v12, a0 ; CHECK-NEXT: vslide1down.vx v12, v12, a1 @@ -1015,8 +1045,13 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB42_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB42_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret @@ -1041,7 +1076,12 @@ define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB43_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB43_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vslide1down.vx v16, v16, a0 ; CHECK-NEXT: vslide1down.vx v16, v16, a1 Index: llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -874,8 +874,13 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB36_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB36_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-NEXT: ret @@ -900,7 +905,12 @@ define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB37_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB37_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v9, a1 ; CHECK-NEXT: vslide1up.vx v9, v10, a0 @@ -927,8 +937,13 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB38_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB38_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v10, a0 ; CHECK-NEXT: ret @@ -953,7 +968,12 @@ define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB39_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB39_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v10, a1 ; CHECK-NEXT: vslide1up.vx v10, v12, a0 @@ -980,8 +1000,13 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB40_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB40_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v12, a0 ; CHECK-NEXT: ret @@ -1006,7 +1031,12 @@ define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB41_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB41_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v12, a1 ; CHECK-NEXT: vslide1up.vx v12, v16, a0 @@ -1033,8 +1063,13 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a2, a2, 1 -; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB42_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB42_2: # %entry +; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v16, a0 ; CHECK-NEXT: ret @@ -1059,7 +1094,12 @@ define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: slli a3, a2, 1 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: li a3, -1 +; CHECK-NEXT: bltu a4, a2, .LBB43_2 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: mv a3, a4 +; CHECK-NEXT: .LBB43_2: # %entry ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v24, v16, a1 ; CHECK-NEXT: vslide1up.vx v16, v24, a0