diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -64,6 +64,7 @@ bool selectVSplat(SDValue N, SDValue &SplatVal); bool selectVSplatSimm5(SDValue N, SDValue &SplatVal); bool selectVSplatUimm5(SDValue N, SDValue &SplatVal); + bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal); bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm); template bool selectRVVSimm5(SDValue N, SDValue &Imm) { diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -1150,7 +1150,12 @@ return true; } -bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) { +using ValidateFn = bool (*)(int64_t); + +static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, + SelectionDAG &DAG, + const RISCVSubtarget &Subtarget, + ValidateFn ValidateImm) { if ((N.getOpcode() != ISD::SPLAT_VECTOR && N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 && N.getOpcode() != RISCVISD::VMV_V_X_VL) || @@ -1159,28 +1164,38 @@ int64_t SplatImm = cast(N.getOperand(0))->getSExtValue(); - // Both ISD::SPLAT_VECTOR and RISCVISD::SPLAT_VECTOR_I64 share semantics when - // the operand type is wider than the resulting vector element type: an - // implicit truncation first takes place. Therefore, perform a manual - // truncation/sign-extension in order to ignore any truncated bits and catch - // any zero-extended immediate. + // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL + // share semantics when the operand type is wider than the resulting vector + // element type: an implicit truncation first takes place. Therefore, perform + // a manual truncation/sign-extension in order to ignore any truncated bits + // and catch any zero-extended immediate. // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first // sign-extending to (XLenVT -1). - MVT XLenVT = Subtarget->getXLenVT(); + MVT XLenVT = Subtarget.getXLenVT(); assert(XLenVT == N.getOperand(0).getSimpleValueType() && "Unexpected splat operand type"); MVT EltVT = N.getSimpleValueType().getVectorElementType(); - if (EltVT.bitsLT(XLenVT)) { + if (EltVT.bitsLT(XLenVT)) SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits()); - } - if (!isInt<5>(SplatImm)) + if (!ValidateImm(SplatImm)) return false; - SplatVal = CurDAG->getTargetConstant(SplatImm, SDLoc(N), XLenVT); + SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT); return true; } +bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) { + return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget, + [](int64_t Imm) { return isInt<5>(Imm); }); +} + +bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) { + return selectVSplatSimmHelper( + N, SplatVal, *CurDAG, *Subtarget, + [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; }); +} + bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) { if ((N.getOpcode() != ISD::SPLAT_VECTOR && N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 && diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -32,6 +32,8 @@ def SplatPat : ComplexPattern; def SplatPat_simm5 : ComplexPattern; def SplatPat_uimm5 : ComplexPattern; +def SplatPat_simm5_plus1 + : ComplexPattern; class SwapHelper { dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); @@ -255,6 +257,18 @@ SplatPat_simm5, simm5, swap>; } +multiclass VPatIntegerSetCCSDNode_VIPlus1 { + foreach vti = AllIntegerVectors in { + defvar instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX); + def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), + (vti.Vector (SplatPat_simm5_plus1 simm5_plus1:$rs2)), + cc)), + (instruction vti.RegClass:$rs1, (DecImm simm5_plus1:$rs2), + vti.AVL, vti.SEW)>; + } +} + multiclass VPatFPSetCCSDNode_VV_VF_FV { @@ -413,10 +427,10 @@ defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; -// FIXME: Support immediate forms of these by choosing SLE decrementing the -// immediate defm "" : VPatIntegerSetCCSDNode_VV_VX; defm "" : VPatIntegerSetCCSDNode_VV_VX; +defm "" : VPatIntegerSetCCSDNode_VIPlus1; +defm "" : VPatIntegerSetCCSDNode_VIPlus1; defm "" : VPatIntegerSetCCSDNode_VV; defm "" : VPatIntegerSetCCSDNode_VV; @@ -426,10 +440,10 @@ defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; -// FIXME: Support immediate forms of these by choosing SGT and decrementing the -// immediate defm "" : VPatIntegerSetCCSDNode_VV; defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VIPlus1; +defm "" : VPatIntegerSetCCSDNode_VIPlus1; // 12.9. Vector Integer Min/Max Instructions defm "" : VPatBinarySDNode_VV_VX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -396,7 +396,6 @@ multiclass VPatIntegerSetCCVL_VI_Swappable { defvar instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX); - defvar ImmPat = !cast("sew"#vti.SEW#"simm5"); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (SplatPat_simm5 simm5:$rs2), cc, (vti.Mask true_mask), @@ -409,6 +408,17 @@ (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>; } +multiclass VPatIntegerSetCCVL_VIPlus1 { + defvar instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX); + def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), + (SplatPat_simm5_plus1 simm5:$rs2), cc, + (vti.Mask true_mask), + (XLenVT (VLOp GPR:$vl)))), + (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), + GPR:$vl, vti.SEW)>; +} + multiclass VPatFPSetCCVL_VV_VF_FV { @@ -637,12 +647,15 @@ defm "" : VPatIntegerSetCCVL_VX_Swappable; // There is no VMSGE(U)_VX instruction - // FIXME: Support immediate forms of these by choosing SGT and decrementing - // the immediate defm "" : VPatIntegerSetCCVL_VI_Swappable; defm "" : VPatIntegerSetCCVL_VI_Swappable; defm "" : VPatIntegerSetCCVL_VI_Swappable; defm "" : VPatIntegerSetCCVL_VI_Swappable; + + defm "" : VPatIntegerSetCCVL_VIPlus1; + defm "" : VPatIntegerSetCCVL_VIPlus1; + defm "" : VPatIntegerSetCCVL_VIPlus1; + defm "" : VPatIntegerSetCCVL_VIPlus1; } // foreach vti = AllIntegerVectors // 12.9. Vector Integer Min/Max Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll @@ -570,7 +570,7 @@ ; CHECK-NEXT: addi a2, zero, 128 ; CHECK-NEXT: vsetvli a2, a2, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vmslt.vx v25, v8, zero +; CHECK-NEXT: vmsle.vi v25, v8, -1 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x @@ -586,9 +586,8 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu ; CHECK-NEXT: vle8.v v25, (a0) -; CHECK-NEXT: vmv.v.i v26, 0 -; CHECK-NEXT: vmsle.vv v27, v26, v25 -; CHECK-NEXT: vse1.v v27, (a1) +; CHECK-NEXT: vmsgt.vi v26, v25, -1 +; CHECK-NEXT: vse1.v v26, (a1) ; CHECK-NEXT: ret %a = load <8 x i8>, <8 x i8>* %x %b = insertelement <8 x i8> undef, i8 0, i32 0 @@ -638,8 +637,7 @@ ; CHECK-NEXT: addi a2, zero, 64 ; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu ; CHECK-NEXT: vle8.v v28, (a0) -; CHECK-NEXT: addi a0, zero, 5 -; CHECK-NEXT: vmsltu.vx v25, v28, a0 +; CHECK-NEXT: vmsleu.vi v25, v28, 4 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x i8>, <64 x i8>* %x @@ -656,8 +654,7 @@ ; CHECK-NEXT: addi a2, zero, 128 ; CHECK-NEXT: vsetvli a2, a2, e8,m8,ta,mu ; CHECK-NEXT: vle8.v v8, (a0) -; CHECK-NEXT: vmv.v.i v16, 5 -; CHECK-NEXT: vmsleu.vv v25, v16, v8 +; CHECK-NEXT: vmsgtu.vi v25, v8, 4 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <128 x i8>, <128 x i8>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll @@ -7,13 +7,13 @@ ; CHECK-LABEL: saddo_nvx2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu -; CHECK-NEXT: vmslt.vx v25, v9, zero -; CHECK-NEXT: vadd.vv v26, v8, v9 -; CHECK-NEXT: vmslt.vv v27, v26, v8 +; CHECK-NEXT: vadd.vv v25, v8, v9 +; CHECK-NEXT: vmslt.vv v26, v25, v8 +; CHECK-NEXT: vmsle.vi v27, v9, -1 ; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu -; CHECK-NEXT: vmxor.mm v0, v25, v27 +; CHECK-NEXT: vmxor.mm v0, v27, v26 ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu -; CHECK-NEXT: vmerge.vim v8, v26, 0, v0 +; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.sadd.with.overflow.nxv2i32( %x, %y) %b = extractvalue { , } %a, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll @@ -223,8 +223,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, 15 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -260,8 +259,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, 1 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,8 +271,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, -15 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -285,10 +282,8 @@ define @icmp_uge_vi_nxv8i8_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -347,9 +342,8 @@ define @icmp_ult_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -396,9 +390,8 @@ define @icmp_ult_vi_nxv8i8_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -553,8 +546,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, -15 -; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -578,8 +570,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, 0 -; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -590,10 +581,8 @@ define @icmp_sge_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -652,9 +641,8 @@ define @icmp_slt_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,7 +666,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -689,9 +677,8 @@ define @icmp_slt_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -964,8 +951,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, 15 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1001,8 +987,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, 1 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1014,8 +999,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, -15 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1026,10 +1010,8 @@ define @icmp_uge_vi_nxv8i16_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1088,9 +1070,8 @@ define @icmp_ult_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1137,9 +1118,8 @@ define @icmp_ult_vi_nxv8i16_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1294,8 +1274,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, -15 -; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1319,8 +1298,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, 0 -; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1331,10 +1309,8 @@ define @icmp_sge_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1393,9 +1369,8 @@ define @icmp_slt_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1419,7 +1394,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1430,9 +1405,8 @@ define @icmp_slt_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1705,8 +1679,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, 15 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1742,8 +1715,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, 1 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1755,8 +1727,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, -15 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1767,10 +1738,8 @@ define @icmp_uge_vi_nxv8i32_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1829,9 +1798,8 @@ define @icmp_ult_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1878,9 +1846,8 @@ define @icmp_ult_vi_nxv8i32_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2035,8 +2002,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, -15 -; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2060,8 +2026,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, 0 -; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2072,10 +2037,8 @@ define @icmp_sge_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2134,9 +2097,8 @@ define @icmp_slt_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2160,7 +2122,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2171,9 +2133,8 @@ define @icmp_slt_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2497,8 +2458,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, 15 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2534,8 +2494,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, 1 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2547,8 +2506,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, -15 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2559,10 +2517,8 @@ define @icmp_uge_vi_nxv8i64_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v16, a0 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2634,9 +2590,8 @@ define @icmp_ult_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2683,9 +2638,8 @@ define @icmp_ult_vi_nxv8i64_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2878,8 +2832,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, -15 -; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2903,8 +2856,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2915,10 +2867,8 @@ define @icmp_sge_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v16, a0 -; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2990,9 +2940,8 @@ define @icmp_slt_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3016,7 +2965,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -3027,9 +2976,8 @@ define @icmp_slt_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll @@ -223,8 +223,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, 15 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i8 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -260,8 +259,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, 1 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i8 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -273,8 +271,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, -15 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -285,10 +282,8 @@ define @icmp_uge_vi_nxv8i8_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i8_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -347,9 +342,8 @@ define @icmp_ult_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -396,9 +390,8 @@ define @icmp_ult_vi_nxv8i8_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -553,8 +546,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, -15 -; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -578,8 +570,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.i v25, 0 -; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -590,10 +581,8 @@ define @icmp_sge_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmv.v.x v25, a0 -; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -652,9 +641,8 @@ define @icmp_slt_vi_nxv8i8_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i8 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -678,7 +666,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i8 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -689,9 +677,8 @@ define @icmp_slt_vi_nxv8i8_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i8 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -964,8 +951,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, 15 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i16 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1001,8 +987,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, 1 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i16 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1014,8 +999,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, -15 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1026,10 +1010,8 @@ define @icmp_uge_vi_nxv8i16_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i16_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1088,9 +1070,8 @@ define @icmp_ult_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1137,9 +1118,8 @@ define @icmp_ult_vi_nxv8i16_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1294,8 +1274,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, -15 -; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1319,8 +1298,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.i v26, 0 -; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1331,10 +1309,8 @@ define @icmp_sge_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmv.v.x v26, a0 -; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1393,9 +1369,8 @@ define @icmp_slt_vi_nxv8i16_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i16 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1419,7 +1394,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i16 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1430,9 +1405,8 @@ define @icmp_slt_vi_nxv8i16_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i16 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1705,8 +1679,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, 15 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i32 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1742,8 +1715,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, 1 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i32 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1755,8 +1727,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, -15 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1767,10 +1738,8 @@ define @icmp_uge_vi_nxv8i32_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i32_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1829,9 +1798,8 @@ define @icmp_ult_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -1878,9 +1846,8 @@ define @icmp_ult_vi_nxv8i32_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2035,8 +2002,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, -15 -; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2060,8 +2026,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.i v28, 0 -; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2072,10 +2037,8 @@ define @icmp_sge_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmv.v.x v28, a0 -; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2134,9 +2097,8 @@ define @icmp_slt_vi_nxv8i32_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i32 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2160,7 +2122,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i32 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2171,9 +2133,8 @@ define @icmp_slt_vi_nxv8i32_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i32 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2446,8 +2407,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, 15 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 14 ; CHECK-NEXT: ret %head = insertelement undef, i64 15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2483,8 +2443,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, 1 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i64 1, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2496,8 +2455,7 @@ ; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, -15 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2508,10 +2466,8 @@ define @icmp_uge_vi_nxv8i64_5( %va) { ; CHECK-LABEL: icmp_uge_vi_nxv8i64_5: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v16, a0 -; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2570,9 +2526,8 @@ define @icmp_ult_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2619,9 +2574,8 @@ define @icmp_ult_vi_nxv8i64_4( %va) { ; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmsltu.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2776,8 +2730,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, -15 -; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2801,8 +2754,7 @@ ; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.i v16, 0 -; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: vmsgt.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2813,10 +2765,8 @@ define @icmp_sge_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmv.v.x v16, a0 -; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2875,9 +2825,8 @@ define @icmp_slt_vi_nxv8i64_1( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, -15 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, -16 ; CHECK-NEXT: ret %head = insertelement undef, i64 -15, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2901,7 +2850,7 @@ ; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, zero +; CHECK-NEXT: vmsle.vi v0, v8, -1 ; CHECK-NEXT: ret %head = insertelement undef, i64 0, i32 0 %splat = shufflevector %head, undef, zeroinitializer @@ -2912,9 +2861,8 @@ define @icmp_slt_vi_nxv8i64_3( %va) { ; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: ; CHECK: # %bb.0: -; CHECK-NEXT: addi a0, zero, 16 -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vmslt.vx v0, v8, a0 +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v8, 15 ; CHECK-NEXT: ret %head = insertelement undef, i64 16, i32 0 %splat = shufflevector %head, undef, zeroinitializer