diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -641,7 +641,7 @@ let VLMul = m.value; } -class VPseudoUSLoadNoMask : +class VPseudoUSLoadNoMask : Pseudo<(outs RetClass:$rd), (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, @@ -651,7 +651,7 @@ let hasSideEffects = 0; let HasVLOp = 1; let HasSEWOp = 1; - let HasDummyMask = 1; + let HasDummyMask = DummyMask; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -794,7 +794,7 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VPseudoUSStoreNoMask: +class VPseudoUSStoreNoMask: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>, RISCVVPseudo, @@ -804,7 +804,7 @@ let hasSideEffects = 0; let HasVLOp = 1; let HasSEWOp = 1; - let HasDummyMask = 1; + let HasDummyMask = DummyMask; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1035,7 +1035,8 @@ class VPseudoBinaryNoMask : + string Constraint, + int DummyMask = 1> : Pseudo<(outs RetClass:$rd), (ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>, RISCVVPseudo { @@ -1045,7 +1046,7 @@ let Constraints = Constraint; let HasVLOp = 1; let HasSEWOp = 1; - let HasDummyMask = 1; + let HasDummyMask = DummyMask; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -1544,7 +1545,8 @@ multiclass VPseudoLoadMask { foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_V_" # mti.BX : VPseudoUSLoadNoMask; + def "_V_" # mti.BX : VPseudoUSLoadNoMask; } } } @@ -1616,7 +1618,7 @@ multiclass VPseudoStoreMask { foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_V_" # mti.BX : VPseudoUSStoreNoMask; + def "_V_" # mti.BX : VPseudoUSStoreNoMask; } } } @@ -1866,7 +1868,7 @@ multiclass VPseudoVALU_MM { foreach m = MxList in let VLMul = m.value in { - def "_MM_" # m.MX : VPseudoBinaryNoMask, + def "_MM_" # m.MX : VPseudoBinaryNoMask, Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -256,7 +256,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vmflt.vv v16, v12, v8 -; CHECK-NEXT: vmnand.mm v8, v16, v16 +; CHECK-NEXT: vmnot.m v8, v16 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x @@ -290,7 +290,7 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v12, (a1) ; CHECK-NEXT: vmflt.vv v16, v8, v12 -; CHECK-NEXT: vmnand.mm v8, v16, v16 +; CHECK-NEXT: vmnot.m v8, v16 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x @@ -323,7 +323,7 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vmfle.vv v16, v12, v8 -; CHECK-NEXT: vmnand.mm v8, v16, v16 +; CHECK-NEXT: vmnot.m v8, v16 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -357,7 +357,7 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmfle.vv v24, v8, v16 -; CHECK-NEXT: vmnand.mm v8, v24, v24 +; CHECK-NEXT: vmnot.m v8, v24 ; CHECK-NEXT: vsm.v v8, (a2) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x @@ -761,7 +761,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v12, v12 +; CHECK-NEXT: vmnot.m v8, v12 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x @@ -795,7 +795,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v12, v12 +; CHECK-NEXT: vmnot.m v8, v12 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x @@ -828,7 +828,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfge.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v12, v12 +; CHECK-NEXT: vmnot.m v8, v12 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -862,7 +862,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v16, v16 +; CHECK-NEXT: vmnot.m v8, v16 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x @@ -1269,7 +1269,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v12, v12 +; CHECK-NEXT: vmnot.m v8, v12 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x @@ -1303,7 +1303,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v12, v12 +; CHECK-NEXT: vmnot.m v8, v12 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x @@ -1336,7 +1336,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfle.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v12, v12 +; CHECK-NEXT: vmnot.m v8, v12 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -1370,7 +1370,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v8, v16, v16 +; CHECK-NEXT: vmnot.m v8, v16 ; CHECK-NEXT: vsm.v v8, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll @@ -59,7 +59,7 @@ ; CHECK-NEXT: li a1, 64 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vlm.v v8, (a0) -; CHECK-NEXT: vmnand.mm v8, v8, v8 +; CHECK-NEXT: vmnot.m v8, v8 ; CHECK-NEXT: vsm.v v8, (a0) ; CHECK-NEXT: ret %a = load <64 x i1>, <64 x i1>* %x diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -10,7 +10,7 @@ ; CHECK-LABEL: vpreduce_and_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -62,7 +62,7 @@ ; CHECK-LABEL: vpreduce_and_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -114,7 +114,7 @@ ; CHECK-LABEL: vpreduce_and_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -166,7 +166,7 @@ ; CHECK-LABEL: vpreduce_and_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -218,7 +218,7 @@ ; CHECK-LABEL: vpreduce_and_v10i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -235,7 +235,7 @@ ; CHECK-LABEL: vpreduce_and_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -259,7 +259,7 @@ ; CHECK-NEXT: mv a3, a2 ; CHECK-NEXT: .LBB14_2: ; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu -; CHECK-NEXT: vmnand.mm v8, v8, v8 +; CHECK-NEXT: vmnot.m v8, v8 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vcpop.m a2, v8, v0.t ; CHECK-NEXT: li a3, 128 @@ -269,7 +269,7 @@ ; CHECK-NEXT: li a1, 128 ; CHECK-NEXT: .LBB14_4: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmnand.mm v8, v11, v11 +; CHECK-NEXT: vmnot.m v8, v11 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vcpop.m a1, v8, v0.t ; CHECK-NEXT: seqz a1, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll @@ -315,7 +315,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -327,7 +327,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -341,7 +341,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -355,7 +355,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -367,7 +367,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -381,7 +381,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -395,7 +395,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -407,7 +407,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -421,7 +421,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -435,7 +435,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -447,7 +447,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -461,7 +461,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement <8 x half> poison, half %b, i32 0 %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer @@ -876,7 +876,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vv v16, v8, v12, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -888,7 +888,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -902,7 +902,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -916,7 +916,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -928,7 +928,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -942,7 +942,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -956,7 +956,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vv v16, v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -968,7 +968,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -982,7 +982,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -996,7 +996,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vv v16, v12, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) ret <8 x i1> %v @@ -1008,7 +1008,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer @@ -1022,7 +1022,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %elt.head = insertelement <8 x double> poison, double %b, i32 0 %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vreductions-mask.ll @@ -86,7 +86,7 @@ ; CHECK-LABEL: vreduce_and_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -129,7 +129,7 @@ ; CHECK-LABEL: vreduce_and_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -172,7 +172,7 @@ ; CHECK-LABEL: vreduce_and_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -215,7 +215,7 @@ ; CHECK-LABEL: vreduce_and_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -288,7 +288,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 32 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu -; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 +; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: neg a0, a0 @@ -367,7 +367,7 @@ ; LMULMAX8: # %bb.0: ; LMULMAX8-NEXT: li a0, 64 ; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu -; LMULMAX8-NEXT: vmnand.mm v8, v0, v0 +; LMULMAX8-NEXT: vmnot.m v8, v0 ; LMULMAX8-NEXT: vcpop.m a0, v8 ; LMULMAX8-NEXT: seqz a0, a0 ; LMULMAX8-NEXT: neg a0, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll @@ -315,7 +315,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f16( %va, %vb, metadata !"ugt", %m, i32 %evl) ret %v @@ -327,7 +327,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -341,7 +341,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -355,7 +355,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f16( %va, %vb, metadata !"uge", %m, i32 %evl) ret %v @@ -367,7 +367,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -381,7 +381,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -395,7 +395,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f16( %va, %vb, metadata !"ult", %m, i32 %evl) ret %v @@ -407,7 +407,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -421,7 +421,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -435,7 +435,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f16( %va, %vb, metadata !"ule", %m, i32 %evl) ret %v @@ -447,7 +447,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -461,7 +461,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -876,7 +876,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v12, v8, v10, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f16( %va, %vb, metadata !"ugt", %m, i32 %evl) ret %v @@ -888,7 +888,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -902,7 +902,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -916,7 +916,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v12, v8, v10, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f16( %va, %vb, metadata !"uge", %m, i32 %evl) ret %v @@ -928,7 +928,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -942,7 +942,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -956,7 +956,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vv v12, v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f16( %va, %vb, metadata !"ult", %m, i32 %evl) ret %v @@ -968,7 +968,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -982,7 +982,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -996,7 +996,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vv v12, v10, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f16( %va, %vb, metadata !"ule", %m, i32 %evl) ret %v @@ -1008,7 +1008,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1022,7 +1022,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1425,7 +1425,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f64( %va, %vb, metadata !"ugt", %m, i32 %evl) ret %v @@ -1437,7 +1437,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1451,7 +1451,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1465,7 +1465,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f64( %va, %vb, metadata !"uge", %m, i32 %evl) ret %v @@ -1477,7 +1477,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1491,7 +1491,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1505,7 +1505,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f64( %va, %vb, metadata !"ult", %m, i32 %evl) ret %v @@ -1517,7 +1517,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1531,7 +1531,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1545,7 +1545,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv1f64( %va, %vb, metadata !"ule", %m, i32 %evl) ret %v @@ -1557,7 +1557,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1571,7 +1571,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -1986,7 +1986,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v24, v8, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f64( %va, %vb, metadata !"ugt", %m, i32 %evl) ret %v @@ -1998,7 +1998,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2012,7 +2012,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2026,7 +2026,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v24, v8, v16, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f64( %va, %vb, metadata !"uge", %m, i32 %evl) ret %v @@ -2038,7 +2038,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2052,7 +2052,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2066,7 +2066,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vv v24, v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f64( %va, %vb, metadata !"ult", %m, i32 %evl) ret %v @@ -2078,7 +2078,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2092,7 +2092,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2106,7 +2106,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vv v24, v16, v8, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %v = call @llvm.vp.fcmp.nxv8f64( %va, %vb, metadata !"ule", %m, i32 %evl) ret %v @@ -2118,7 +2118,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer @@ -2132,7 +2132,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %elt.head = insertelement poison, double %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp.ll @@ -485,7 +485,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v12, v8, v10 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -496,7 +496,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -509,7 +509,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -544,7 +544,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v12, v8, v10 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -555,7 +555,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -568,7 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vv v12, v10, v8 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -614,7 +614,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfge.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -627,7 +627,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfle.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -662,7 +662,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vv v12, v10, v8 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -673,7 +673,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmfgt.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -686,7 +686,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vmflt.vf v10, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1319,7 +1319,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v16, v8, v12 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -1330,7 +1330,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1343,7 +1343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1378,7 +1378,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v16, v8, v12 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -1389,7 +1389,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1402,7 +1402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1437,7 +1437,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vv v16, v12, v8 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -1448,7 +1448,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfge.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1461,7 +1461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfle.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1496,7 +1496,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vv v16, v12, v8 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -1507,7 +1507,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmfgt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -1520,7 +1520,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vmflt.vf v12, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret %head = insertelement poison, float %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2153,7 +2153,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v24, v8, v16 -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb ret %vc @@ -2164,7 +2164,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2177,7 +2177,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2212,7 +2212,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v24, v8, v16 -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb ret %vc @@ -2223,7 +2223,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2236,7 +2236,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2271,7 +2271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vv v24, v16, v8 -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb ret %vc @@ -2282,7 +2282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfge.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2295,7 +2295,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfle.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2330,7 +2330,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vv v24, v16, v8 -; CHECK-NEXT: vmnand.mm v0, v24, v24 +; CHECK-NEXT: vmnot.m v0, v24 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb ret %vc @@ -2341,7 +2341,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmfgt.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer @@ -2354,7 +2354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vmflt.vf v16, v8, fa0 -; CHECK-NEXT: vmnand.mm v0, v16, v16 +; CHECK-NEXT: vmnot.m v0, v16 ; CHECK-NEXT: ret %head = insertelement poison, double %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -947,7 +947,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i8.i8( @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i8.i8( @@ -1043,7 +1043,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i8.i8( @@ -1091,7 +1091,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv8i8.i8( @@ -1139,7 +1139,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv16i8.i8( @@ -1187,7 +1187,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv32i8.i8( @@ -1235,7 +1235,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i16.i16( @@ -1283,7 +1283,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i16.i16( @@ -1331,7 +1331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i16.i16( @@ -1379,7 +1379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv8i16.i16( @@ -1427,7 +1427,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv16i16.i16( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i32.i32( @@ -1523,7 +1523,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i32.i32( @@ -1571,7 +1571,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i32.i32( @@ -1619,7 +1619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv8i32.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -947,7 +947,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i8.i8( @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i8.i8( @@ -1043,7 +1043,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i8.i8( @@ -1091,7 +1091,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv8i8.i8( @@ -1139,7 +1139,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv16i8.i8( @@ -1187,7 +1187,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv32i8.i8( @@ -1235,7 +1235,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i16.i16( @@ -1283,7 +1283,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i16.i16( @@ -1331,7 +1331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i16.i16( @@ -1379,7 +1379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv8i16.i16( @@ -1427,7 +1427,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv16i16.i16( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i32.i32( @@ -1523,7 +1523,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i32.i32( @@ -1571,7 +1571,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i32.i32( @@ -1619,7 +1619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv8i32.i32( @@ -1667,7 +1667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmslt.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv1i64.i64( @@ -1715,7 +1715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmslt.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv2i64.i64( @@ -1763,7 +1763,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmslt.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.nxv4i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -947,7 +947,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( @@ -1043,7 +1043,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( @@ -1091,7 +1091,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( @@ -1139,7 +1139,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( @@ -1187,7 +1187,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( @@ -1235,7 +1235,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( @@ -1283,7 +1283,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( @@ -1331,7 +1331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( @@ -1379,7 +1379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( @@ -1427,7 +1427,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( @@ -1523,7 +1523,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( @@ -1571,7 +1571,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( @@ -1619,7 +1619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -947,7 +947,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( @@ -1043,7 +1043,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( @@ -1091,7 +1091,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( @@ -1139,7 +1139,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( @@ -1187,7 +1187,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( @@ -1235,7 +1235,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( @@ -1283,7 +1283,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( @@ -1331,7 +1331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( @@ -1379,7 +1379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( @@ -1427,7 +1427,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( @@ -1523,7 +1523,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( @@ -1571,7 +1571,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( @@ -1619,7 +1619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( @@ -1667,7 +1667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmsltu.vx v8, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v8, v8 +; CHECK-NEXT: vmnot.m v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( @@ -1715,7 +1715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vmsltu.vx v10, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v10, v10 +; CHECK-NEXT: vmnot.m v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( @@ -1763,7 +1763,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vmsltu.vx v12, v8, a0 -; CHECK-NEXT: vmnand.mm v0, v12, v12 +; CHECK-NEXT: vmnot.m v0, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll @@ -8,7 +8,7 @@ ; CHECK-LABEL: vpreduce_and_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -60,7 +60,7 @@ ; CHECK-LABEL: vpreduce_and_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -112,7 +112,7 @@ ; CHECK-LABEL: vpreduce_and_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -164,7 +164,7 @@ ; CHECK-LABEL: vpreduce_and_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -216,7 +216,7 @@ ; CHECK-LABEL: vpreduce_and_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -268,7 +268,7 @@ ; CHECK-LABEL: vpreduce_and_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 @@ -338,7 +338,7 @@ ; CHECK-LABEL: vpreduce_and_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu -; CHECK-NEXT: vmnand.mm v9, v0, v0 +; CHECK-NEXT: vmnot.m v9, v0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vcpop.m a1, v9, v0.t ; CHECK-NEXT: seqz a1, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll --- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask.ll @@ -36,7 +36,7 @@ ; CHECK-LABEL: vreduce_and_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -79,7 +79,7 @@ ; CHECK-LABEL: vreduce_and_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -122,7 +122,7 @@ ; CHECK-LABEL: vreduce_and_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -165,7 +165,7 @@ ; CHECK-LABEL: vreduce_and_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -208,7 +208,7 @@ ; CHECK-LABEL: vreduce_and_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -251,7 +251,7 @@ ; CHECK-LABEL: vreduce_and_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0 @@ -294,7 +294,7 @@ ; CHECK-LABEL: vreduce_and_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu -; CHECK-NEXT: vmnand.mm v8, v0, v0 +; CHECK-NEXT: vmnot.m v8, v0 ; CHECK-NEXT: vcpop.m a0, v8 ; CHECK-NEXT: seqz a0, a0 ; CHECK-NEXT: neg a0, a0