diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -6165,7 +6165,7 @@ SDValue Vfirst = DAG.getNode(RISCVISD::VFIRST_VL, DL, XLenVT, Vec, Mask, VL); return DAG.getSetCC(DL, XLenVT, Vfirst, DAG.getConstant(0, DL, XLenVT), - ISD::SETEQ); + ISD::SETNE); } if (VecVT.isFixedLengthVector()) { unsigned NumElts = VecVT.getVectorNumElements(); diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll --- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll +++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll @@ -29,7 +29,7 @@ ; RV32-NEXT: vmerge.vvm v8, v9, v8, v0 ; RV32-NEXT: vmv.x.s a0, v8 ; RV32-NEXT: vfirst.m a1, v10 -; RV32-NEXT: seqz a1, a1 +; RV32-NEXT: snez a1, a1 ; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vmsne.vi v0, v8, 0 @@ -51,7 +51,7 @@ ; RV64-NEXT: vmerge.vvm v8, v10, v8, v0 ; RV64-NEXT: vmv.x.s a0, v8 ; RV64-NEXT: vfirst.m a1, v12 -; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: snez a1, a1 ; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; RV64-NEXT: vmv.v.x v8, a1 ; RV64-NEXT: vmsne.vi v0, v8, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll @@ -217,7 +217,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer @@ -232,7 +232,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer @@ -247,7 +247,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer @@ -262,7 +262,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer @@ -277,7 +277,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmseq.vi v10, v8, 0 ; CHECK-NEXT: vfirst.m a0, v10 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer @@ -292,7 +292,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmseq.vi v12, v8, 0 ; CHECK-NEXT: vfirst.m a0, v12 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer @@ -307,7 +307,7 @@ ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmseq.vi v16, v8, 0 ; CHECK-NEXT: vfirst.m a0, v16 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load , * %x %b = icmp eq %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll @@ -20,7 +20,7 @@ ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -45,7 +45,7 @@ ; CHECK-NEXT: vmv.x.s a0, v8 ; CHECK-NEXT: vslide1down.vx v8, v9, a0 ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 @@ -82,7 +82,7 @@ ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV32-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -115,7 +115,7 @@ ; RV32-BITS-256-NEXT: srli a0, a0, 31 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vfirst.m a0, v0 -; RV32-BITS-256-NEXT: seqz a0, a0 +; RV32-BITS-256-NEXT: snez a0, a0 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -148,7 +148,7 @@ ; RV32-BITS-512-NEXT: srli a0, a0, 31 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vfirst.m a0, v0 -; RV32-BITS-512-NEXT: seqz a0, a0 +; RV32-BITS-512-NEXT: snez a0, a0 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -181,7 +181,7 @@ ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV64-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -214,7 +214,7 @@ ; RV64-BITS-256-NEXT: srli a0, a0, 63 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vfirst.m a0, v0 -; RV64-BITS-256-NEXT: seqz a0, a0 +; RV64-BITS-256-NEXT: snez a0, a0 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -247,7 +247,7 @@ ; RV64-BITS-512-NEXT: srli a0, a0, 63 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vfirst.m a0, v0 -; RV64-BITS-512-NEXT: seqz a0, a0 +; RV64-BITS-512-NEXT: snez a0, a0 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -308,7 +308,7 @@ ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV32-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -365,7 +365,7 @@ ; RV32-BITS-256-NEXT: srli a0, a0, 31 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vfirst.m a0, v0 -; RV32-BITS-256-NEXT: seqz a0, a0 +; RV32-BITS-256-NEXT: snez a0, a0 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -422,7 +422,7 @@ ; RV32-BITS-512-NEXT: srli a0, a0, 31 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vfirst.m a0, v0 -; RV32-BITS-512-NEXT: seqz a0, a0 +; RV32-BITS-512-NEXT: snez a0, a0 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -479,7 +479,7 @@ ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV64-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -536,7 +536,7 @@ ; RV64-BITS-256-NEXT: srli a0, a0, 63 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vfirst.m a0, v0 -; RV64-BITS-256-NEXT: seqz a0, a0 +; RV64-BITS-256-NEXT: snez a0, a0 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -593,7 +593,7 @@ ; RV64-BITS-512-NEXT: srli a0, a0, 63 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vfirst.m a0, v0 -; RV64-BITS-512-NEXT: seqz a0, a0 +; RV64-BITS-512-NEXT: snez a0, a0 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -702,7 +702,7 @@ ; RV32-BITS-UNKNOWN-NEXT: srli a0, a0, 31 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV32-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -807,7 +807,7 @@ ; RV32-BITS-256-NEXT: srli a0, a0, 31 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vfirst.m a0, v0 -; RV32-BITS-256-NEXT: seqz a0, a0 +; RV32-BITS-256-NEXT: snez a0, a0 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -912,7 +912,7 @@ ; RV32-BITS-512-NEXT: srli a0, a0, 31 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vfirst.m a0, v0 -; RV32-BITS-512-NEXT: seqz a0, a0 +; RV32-BITS-512-NEXT: snez a0, a0 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -1017,7 +1017,7 @@ ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV64-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -1122,7 +1122,7 @@ ; RV64-BITS-256-NEXT: srli a0, a0, 63 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vfirst.m a0, v0 -; RV64-BITS-256-NEXT: seqz a0, a0 +; RV64-BITS-256-NEXT: snez a0, a0 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -1227,7 +1227,7 @@ ; RV64-BITS-512-NEXT: srli a0, a0, 63 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vfirst.m a0, v0 -; RV64-BITS-512-NEXT: seqz a0, a0 +; RV64-BITS-512-NEXT: snez a0, a0 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -1434,7 +1434,7 @@ ; RV32-BITS-UNKNOWN-NEXT: srli a1, a1, 31 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a1 ; RV32-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV32-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV32-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV32-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -1637,7 +1637,7 @@ ; RV32-BITS-256-NEXT: srli a1, a1, 31 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a1 ; RV32-BITS-256-NEXT: vfirst.m a0, v0 -; RV32-BITS-256-NEXT: seqz a0, a0 +; RV32-BITS-256-NEXT: snez a0, a0 ; RV32-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -1840,7 +1840,7 @@ ; RV32-BITS-512-NEXT: srli a1, a1, 31 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a1 ; RV32-BITS-512-NEXT: vfirst.m a0, v0 -; RV32-BITS-512-NEXT: seqz a0, a0 +; RV32-BITS-512-NEXT: snez a0, a0 ; RV32-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV32-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV32-BITS-512-NEXT: vmsne.vi v0, v8, 0 @@ -2040,7 +2040,7 @@ ; RV64-BITS-UNKNOWN-NEXT: srli a0, a0, 63 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vfirst.m a0, v0 -; RV64-BITS-UNKNOWN-NEXT: seqz a0, a0 +; RV64-BITS-UNKNOWN-NEXT: snez a0, a0 ; RV64-BITS-UNKNOWN-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-UNKNOWN-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-UNKNOWN-NEXT: vmsne.vi v0, v8, 0 @@ -2240,7 +2240,7 @@ ; RV64-BITS-256-NEXT: srli a0, a0, 63 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vfirst.m a0, v0 -; RV64-BITS-256-NEXT: seqz a0, a0 +; RV64-BITS-256-NEXT: snez a0, a0 ; RV64-BITS-256-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-256-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-256-NEXT: vmsne.vi v0, v8, 0 @@ -2440,7 +2440,7 @@ ; RV64-BITS-512-NEXT: srli a0, a0, 63 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vfirst.m a0, v0 -; RV64-BITS-512-NEXT: seqz a0, a0 +; RV64-BITS-512-NEXT: snez a0, a0 ; RV64-BITS-512-NEXT: vslide1down.vx v8, v8, a0 ; RV64-BITS-512-NEXT: vand.vi v8, v8, 1 ; RV64-BITS-512-NEXT: vmsne.vi v0, v8, 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll @@ -453,7 +453,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <1 x i8>, ptr %x %b = icmp eq <1 x i8> %a, zeroinitializer @@ -468,7 +468,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = icmp eq <2 x i8> %a, zeroinitializer @@ -483,7 +483,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <4 x i8>, ptr %x %b = icmp eq <4 x i8> %a, zeroinitializer @@ -498,7 +498,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <8 x i8>, ptr %x %b = icmp eq <8 x i8> %a, zeroinitializer @@ -513,7 +513,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v8, v8, 0 ; CHECK-NEXT: vfirst.m a0, v8 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <16 x i8>, ptr %x %b = icmp eq <16 x i8> %a, zeroinitializer @@ -529,7 +529,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v10, v8, 0 ; CHECK-NEXT: vfirst.m a0, v10 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <32 x i8>, ptr %x %b = icmp eq <32 x i8> %a, zeroinitializer @@ -545,7 +545,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v12, v8, 0 ; CHECK-NEXT: vfirst.m a0, v12 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <64 x i8>, ptr %x %b = icmp eq <64 x i8> %a, zeroinitializer @@ -561,7 +561,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v16, v8, 0 ; CHECK-NEXT: vfirst.m a0, v16 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <128 x i8>, ptr %x %b = icmp eq <128 x i8> %a, zeroinitializer @@ -577,7 +577,7 @@ ; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vmseq.vi v16, v8, 0 ; CHECK-NEXT: vfirst.m a0, v16 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %a = load <256 x i8>, ptr %x %b = icmp eq <256 x i8> %a, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -36,7 +36,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB0_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB0_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vle8.v v8, (a0) @@ -870,7 +870,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB13_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB13_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vle16.v v8, (a0) @@ -2067,7 +2067,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB27_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB27_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vle32.v v8, (a0) @@ -3607,7 +3607,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vfirst.m a2, v0 -; RV32ZVE32F-NEXT: bnez a2, .LBB42_2 +; RV32ZVE32F-NEXT: beqz a2, .LBB42_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load ; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 @@ -3620,7 +3620,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a2, v0 -; RV64ZVE32F-NEXT: bnez a2, .LBB42_2 +; RV64ZVE32F-NEXT: beqz a2, .LBB42_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: ld a1, 0(a0) ; RV64ZVE32F-NEXT: .LBB42_2: # %else @@ -7060,7 +7060,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB58_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB58_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vle16.v v8, (a0) @@ -8023,7 +8023,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB68_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB68_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vle32.v v8, (a0) @@ -9437,7 +9437,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vfirst.m a0, v0 -; RV32ZVE32F-NEXT: bnez a0, .LBB81_2 +; RV32ZVE32F-NEXT: beqz a0, .LBB81_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.load ; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 @@ -9449,7 +9449,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB81_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB81_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.load ; RV64ZVE32F-NEXT: fld fa0, 0(a0) ; RV64ZVE32F-NEXT: .LBB81_2: # %else diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll @@ -33,7 +33,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB0_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB0_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vse8.v v8, (a0) @@ -637,7 +637,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB10_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB10_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) @@ -1593,7 +1593,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB22_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB22_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) @@ -2887,7 +2887,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vfirst.m a2, v0 -; RV32ZVE32F-NEXT: bnez a2, .LBB36_2 +; RV32ZVE32F-NEXT: beqz a2, .LBB36_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.store ; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a2, v8 @@ -2900,7 +2900,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a2, v0 -; RV64ZVE32F-NEXT: bnez a2, .LBB36_2 +; RV64ZVE32F-NEXT: beqz a2, .LBB36_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: sd a0, 0(a1) ; RV64ZVE32F-NEXT: .LBB36_2: # %else @@ -5974,7 +5974,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB52_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB52_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma ; RV64ZVE32F-NEXT: vse16.v v8, (a0) @@ -6820,7 +6820,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB62_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB62_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64ZVE32F-NEXT: vse32.v v8, (a0) @@ -8060,7 +8060,7 @@ ; RV32ZVE32F: # %bb.0: ; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; RV32ZVE32F-NEXT: vfirst.m a0, v0 -; RV32ZVE32F-NEXT: bnez a0, .LBB75_2 +; RV32ZVE32F-NEXT: beqz a0, .LBB75_2 ; RV32ZVE32F-NEXT: # %bb.1: # %cond.store ; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma ; RV32ZVE32F-NEXT: vmv.x.s a0, v8 @@ -8072,7 +8072,7 @@ ; RV64ZVE32F: # %bb.0: ; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma ; RV64ZVE32F-NEXT: vfirst.m a1, v0 -; RV64ZVE32F-NEXT: bnez a1, .LBB75_2 +; RV64ZVE32F-NEXT: beqz a1, .LBB75_2 ; RV64ZVE32F-NEXT: # %bb.1: # %cond.store ; RV64ZVE32F-NEXT: fsd fa0, 0(a0) ; RV64ZVE32F-NEXT: .LBB75_2: # %else diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-store.ll @@ -209,7 +209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vfirst.m a1, v0 -; CHECK-NEXT: seqz a1, a1 +; CHECK-NEXT: snez a1, a1 ; CHECK-NEXT: vmv.x.s a2, v0 ; CHECK-NEXT: andi a3, a2, 2 ; CHECK-NEXT: or a1, a1, a3 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -11,7 +11,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %v) ret i1 %red @@ -24,7 +24,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %v) ret i1 %red @@ -37,7 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %v) ret i1 %red @@ -50,7 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.umax.v1i1(<1 x i1> %v) ret i1 %red @@ -63,7 +63,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.smax.v1i1(<1 x i1> %v) ret i1 %red @@ -76,7 +76,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.umin.v1i1(<1 x i1> %v) ret i1 %red @@ -89,7 +89,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.smin.v1i1(<1 x i1> %v) ret i1 %red @@ -806,7 +806,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vfirst.m a0, v0 -; CHECK-NEXT: seqz a0, a0 +; CHECK-NEXT: snez a0, a0 ; CHECK-NEXT: ret %red = call i1 @llvm.vector.reduce.add.v1i1(<1 x i1> %v) ret i1 %red diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll @@ -9,7 +9,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vfirst.m a0, v0 -; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: snez a0, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32-NEXT: vmv.x.s a0, v0 @@ -39,7 +39,7 @@ ; RV32-NEXT: vslidedown.vi v9, v0, 2 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vfirst.m a1, v9 -; RV32-NEXT: seqz a1, a1 +; RV32-NEXT: snez a1, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV32-NEXT: vmv.x.s a1, v9 @@ -123,7 +123,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vfirst.m a0, v0 -; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: snez a0, a0 ; RV64-NEXT: vslide1down.vx v8, v8, a0 ; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64-NEXT: vmv.x.s a0, v0 @@ -153,7 +153,7 @@ ; RV64-NEXT: vslidedown.vi v9, v0, 2 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vfirst.m a1, v9 -; RV64-NEXT: seqz a1, a1 +; RV64-NEXT: snez a1, a1 ; RV64-NEXT: vslide1down.vx v8, v8, a1 ; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma ; RV64-NEXT: vmv.x.s a1, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll @@ -9,13 +9,13 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vfirst.m a0, v0 -; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: snez a0, a0 ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v8, a0 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vfirst.m a0, v8 -; RV32-NEXT: seqz a0, a0 +; RV32-NEXT: snez a0, a0 ; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma @@ -122,13 +122,13 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vfirst.m a0, v0 -; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: snez a0, a0 ; RV64-NEXT: li a2, 32 ; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV64-NEXT: vslide1down.vx v10, v8, a0 ; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64-NEXT: vfirst.m a0, v8 -; RV64-NEXT: seqz a0, a0 +; RV64-NEXT: snez a0, a0 ; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma ; RV64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma