diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -596,60 +596,80 @@ IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK : RISCV::PseudoVMSLT_VX_MF8_MASK; - VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8; break; case RISCVII::VLMUL::LMUL_F4: VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK : RISCV::PseudoVMSLT_VX_MF4_MASK; - VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4; break; case RISCVII::VLMUL::LMUL_F2: VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK : RISCV::PseudoVMSLT_VX_MF2_MASK; - VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2; break; case RISCVII::VLMUL::LMUL_1: VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK : RISCV::PseudoVMSLT_VX_M1_MASK; - VMXOROpcode = RISCV::PseudoVMXOR_MM_M1; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1; break; case RISCVII::VLMUL::LMUL_2: VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK : RISCV::PseudoVMSLT_VX_M2_MASK; - VMXOROpcode = RISCV::PseudoVMXOR_MM_M2; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2; break; case RISCVII::VLMUL::LMUL_4: VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK : RISCV::PseudoVMSLT_VX_M4_MASK; - VMXOROpcode = RISCV::PseudoVMXOR_MM_M4; - VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4; break; case RISCVII::VLMUL::LMUL_8: VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8; VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK : RISCV::PseudoVMSLT_VX_M8_MASK; + break; + } + // Mask operations use the LMUL from the mask type. + switch (RISCVTargetLowering::getLMUL(VT)) { + default: + llvm_unreachable("Unexpected LMUL!"); + case RISCVII::VLMUL::LMUL_F8: + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8; + break; + case RISCVII::VLMUL::LMUL_F4: + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4; + break; + case RISCVII::VLMUL::LMUL_F2: + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2; + break; + case RISCVII::VLMUL::LMUL_1: + VMXOROpcode = RISCV::PseudoVMXOR_MM_M1; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1; + break; + case RISCVII::VLMUL::LMUL_2: + VMXOROpcode = RISCV::PseudoVMXOR_MM_M2; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2; + break; + case RISCVII::VLMUL::LMUL_4: + VMXOROpcode = RISCV::PseudoVMXOR_MM_M4; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4; + break; + case RISCVII::VLMUL::LMUL_8: VMXOROpcode = RISCV::PseudoVMXOR_MM_M8; VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8; break; } SDValue SEW = CurDAG->getTargetConstant( Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT); + SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT); SDValue VL; selectVLOp(Node->getOperand(5), VL); SDValue MaskedOff = Node->getOperand(1); @@ -662,7 +682,7 @@ CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), 0); ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT, - {Mask, Cmp, VL, SEW})); + {Mask, Cmp, VL, MaskSEW})); return; } @@ -673,7 +693,7 @@ {MaskedOff, Src1, Src2, Mask, VL, SEW}), 0); ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT, - {Cmp, Mask, VL, SEW})); + {Cmp, Mask, VL, MaskSEW})); return; } } diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -55,11 +55,13 @@ // Fields from VTYPE. RISCVII::VLMUL VLMul = RISCVII::LMUL_1; uint8_t SEW = 0; - bool TailAgnostic = false; - bool MaskAgnostic = false; + uint8_t TailAgnostic : 1; + uint8_t MaskAgnostic : 1; + uint8_t MaskRegOp : 1; public: - VSETVLIInfo() : AVLImm(0) {} + VSETVLIInfo() + : AVLImm(0), TailAgnostic(false), MaskAgnostic(false), MaskRegOp(false) {} static VSETVLIInfo getUnknown() { VSETVLIInfo Info; @@ -114,13 +116,14 @@ TailAgnostic = RISCVVType::isTailAgnostic(VType); MaskAgnostic = RISCVVType::isMaskAgnostic(VType); } - void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA) { + void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA, bool MRO) { assert(isValid() && !isUnknown() && "Can't set VTYPE for uninitialized or unknown"); VLMul = L; SEW = S; TailAgnostic = TA; MaskAgnostic = MA; + MaskRegOp = MRO; } unsigned encodeVTYPE() const { @@ -163,21 +166,34 @@ return getSEWLMULRatio() == Other.getSEWLMULRatio(); } - bool isCompatible(const VSETVLIInfo &Other) const { - assert(isValid() && Other.isValid() && + // Determine whether the vector instructions requirements represented by + // InstrInfo are compatible with the previous vsetvli instruction represented + // by this. + bool isCompatible(const VSETVLIInfo &InstrInfo) const { + assert(isValid() && InstrInfo.isValid() && "Can't compare invalid VSETVLIInfos"); // Nothing is compatible with Unknown. - if (isUnknown() || Other.isUnknown()) + if (isUnknown() || InstrInfo.isUnknown()) return false; - // If other doesn't need an AVLReg and the SEW matches, consider it - // compatible. - if (Other.hasAVLReg() && Other.AVLReg == RISCV::NoRegister) { - if (SEW == Other.SEW) + // If the instruction doesn't need an AVLReg and the SEW matches, consider + // it/ compatible. + if (InstrInfo.hasAVLReg() && InstrInfo.AVLReg == RISCV::NoRegister) { + if (SEW == InstrInfo.SEW) return true; } - return hasSameVTYPE(Other) && hasSameAVL(Other); + // VTypes must match unless the instruction is a mask reg operation, then it + // only care about VLMAX. + // FIXME: Mask reg operations are probably ok if "this" VLMAX is larger + // than "InstrInfo". + if (!hasSameVTYPE(InstrInfo) && + !(InstrInfo.MaskRegOp && hasSameVLMAX(InstrInfo) && + TailAgnostic == InstrInfo.TailAgnostic && + MaskAgnostic == InstrInfo.MaskAgnostic)) + return false; + + return hasSameAVL(InstrInfo); } bool operator==(const VSETVLIInfo &Other) const { @@ -316,7 +332,9 @@ RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); unsigned Log2SEW = MI.getOperand(NumOperands - 1).getImm(); - unsigned SEW = 1 << Log2SEW; + // A Log2SEW of 0 is an operation on mask registers only. + bool MaskRegOp = Log2SEW == 0; + unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW"); // Default to tail agnostic unless the destination is tied to a source. @@ -350,7 +368,7 @@ } else InstrInfo.setAVLReg(RISCV::NoRegister); InstrInfo.setVTYPE(VLMul, SEW, /*TailAgnostic*/ TailAgnostic, - /*MaskAgnostic*/ false); + /*MaskAgnostic*/ false, MaskRegOp); return InstrInfo; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -267,9 +267,10 @@ class MTypeInfo { ValueType Mask = Mas; // {SEW, VLMul} values set a valid VType to deal with this mask type. - // we assume SEW=8 and set corresponding LMUL. - int SEW = 8; - int Log2SEW = 3; + // we assume SEW=1 and set corresponding LMUL. vsetvli insertion will + // look for SEW=1 to optimize based on surrounding instructions. + int SEW = 1; + int Log2SEW = 0; LMULInfo LMul = M; string BX = Bx; // Appendix of mask operations. // The pattern fragment which produces the AVL operand, representing the diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll @@ -9,7 +9,6 @@ ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vle16.v v26, (a1) ; CHECK-NEXT: vmfeq.vv v25, v25, v26 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -26,7 +25,6 @@ ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vle16.v v26, (a1) ; CHECK-NEXT: vmfeq.vv v25, v25, v26 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -143,7 +141,6 @@ ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vmflt.vv v25, v26, v28 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x @@ -160,7 +157,6 @@ ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vle16.v v28, (a1) ; CHECK-NEXT: vmflt.vv v25, v26, v28 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x @@ -177,7 +173,6 @@ ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vmfle.vv v25, v28, v26 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -194,7 +189,6 @@ ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vle32.v v28, (a1) ; CHECK-NEXT: vmfle.vv v25, v28, v26 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -262,7 +256,6 @@ ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmflt.vv v25, v8, v28 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret @@ -281,7 +274,6 @@ ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vle16.v v8, (a1) ; CHECK-NEXT: vmfle.vv v25, v28, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x @@ -298,7 +290,6 @@ ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmflt.vv v25, v28, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret @@ -316,7 +307,6 @@ ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vmfle.vv v25, v8, v28 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x @@ -333,7 +323,6 @@ ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmfle.vv v25, v8, v28 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret @@ -351,7 +340,6 @@ ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vle64.v v8, (a1) ; CHECK-NEXT: vmflt.vv v25, v28, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -369,7 +357,6 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmfle.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret @@ -388,7 +375,6 @@ ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vle16.v v16, (a1) ; CHECK-NEXT: vmflt.vv v25, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x @@ -407,7 +393,6 @@ ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmnor.mm v25, v26, v25 ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret @@ -426,7 +411,6 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vle32.v v16, (a1) ; CHECK-NEXT: vmfeq.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x @@ -444,7 +428,6 @@ ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v25, v26, v25 ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret @@ -462,7 +445,6 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vle64.v v16, (a1) ; CHECK-NEXT: vmfne.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a2) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x @@ -480,8 +462,8 @@ ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmfeq.vv v25, v25, v25 ; CHECK-NEXT: vmfeq.vv v26, v26, v26 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -507,8 +489,8 @@ ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmfne.vv v25, v25, v25 ; CHECK-NEXT: vmfne.vv v26, v26, v26 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -532,7 +514,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -549,7 +530,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -666,7 +646,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmflt.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x @@ -683,7 +662,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmflt.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x @@ -700,7 +678,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfge.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -717,7 +694,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfge.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -785,7 +761,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmfgt.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -804,7 +779,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmfle.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x @@ -821,7 +795,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmflt.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -839,7 +812,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmfge.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x @@ -856,7 +828,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmfge.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -874,7 +845,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmflt.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -892,7 +862,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -911,7 +880,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x @@ -930,7 +898,6 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmnor.mm v25, v26, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -949,7 +916,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x @@ -967,7 +933,6 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v25, v26, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -985,7 +950,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x @@ -1004,8 +968,8 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v26, v26, fa0 ; CHECK-NEXT: vmfeq.vv v25, v25, v25 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -1032,8 +996,8 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v26, v26, fa0 ; CHECK-NEXT: vmfne.vv v25, v25, v25 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -1058,7 +1022,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -1075,7 +1038,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vle16.v v25, (a0) ; CHECK-NEXT: vmfeq.vf v25, v25, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x half>, <8 x half>* %x @@ -1192,7 +1154,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmfgt.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x @@ -1209,7 +1170,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vmfgt.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x half>, <16 x half>* %x @@ -1226,7 +1186,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfle.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -1243,7 +1202,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vmfle.vf v25, v26, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x float>, <8 x float>* %x @@ -1311,7 +1269,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmflt.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -1330,7 +1287,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vmfge.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x half>, <32 x half>* %x @@ -1347,7 +1303,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmfgt.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -1365,7 +1320,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e32,m4,ta,mu ; CHECK-NEXT: vle32.v v28, (a0) ; CHECK-NEXT: vmfle.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x float>, <16 x float>* %x @@ -1382,7 +1336,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmfle.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -1400,7 +1353,6 @@ ; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu ; CHECK-NEXT: vle64.v v28, (a0) ; CHECK-NEXT: vmfgt.vf v25, v28, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <8 x double>, <8 x double>* %x @@ -1418,7 +1370,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vmnand.mm v25, v25, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -1437,7 +1388,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e16,m8,ta,mu ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <64 x half>, <64 x half>* %x @@ -1456,7 +1406,6 @@ ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmnor.mm v25, v26, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -1475,7 +1424,6 @@ ; CHECK-NEXT: vsetvli zero, a2, e32,m8,ta,mu ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vmfeq.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <32 x float>, <32 x float>* %x @@ -1493,7 +1441,6 @@ ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v25, v26, v25 ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret @@ -1511,7 +1458,6 @@ ; CHECK-NEXT: vsetivli zero, 16, e64,m8,ta,mu ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmfne.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vse1.v v25, (a1) ; CHECK-NEXT: ret %a = load <16 x double>, <16 x double>* %x @@ -1530,8 +1476,8 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v26, v26, fa0 ; CHECK-NEXT: vmfeq.vv v25, v25, v25 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu @@ -1558,8 +1504,8 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v26, v26, fa0 ; CHECK-NEXT: vmfne.vv v25, v25, v25 -; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmv.v.i v25, 0 ; CHECK-NEXT: vmerge.vim v25, v25, 1, v0 ; CHECK-NEXT: vsetivli zero, 8, e8,mf2,ta,mu diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect.ll @@ -8,9 +8,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle1.v v0, (a2) -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0 ; CHECK-NEXT: vse32.v v26, (a3) ; CHECK-NEXT: ret @@ -27,9 +25,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a1) -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle1.v v0, (a2) -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0 ; CHECK-NEXT: vse32.v v26, (a3) ; CHECK-NEXT: ret @@ -47,9 +43,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle1.v v0, (a1) -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmerge.vim v26, v26, -1, v0 ; CHECK-NEXT: vse32.v v26, (a2) ; CHECK-NEXT: ret @@ -68,9 +62,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) ; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle1.v v0, (a2) -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0 ; CHECK-NEXT: vse32.v v26, (a3) ; CHECK-NEXT: ret @@ -87,9 +79,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle1.v v0, (a1) -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vfmerge.vfm v26, v26, fa0, v0 ; CHECK-NEXT: vse32.v v26, (a2) ; CHECK-NEXT: ret @@ -107,9 +97,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32,m2,ta,mu ; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vle1.v v0, (a1) -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu ; CHECK-NEXT: vmerge.vim v26, v26, 0, v0 ; CHECK-NEXT: vse32.v v26, (a2) ; CHECK-NEXT: ret @@ -128,9 +116,7 @@ ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a0) ; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vle1.v v0, (a2) -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0 ; CHECK-NEXT: vse16.v v26, (a3) ; CHECK-NEXT: ret @@ -147,9 +133,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vle1.v v0, (a2) -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0 ; CHECK-NEXT: vse16.v v26, (a3) ; CHECK-NEXT: ret @@ -167,9 +151,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16,m2,ta,mu ; CHECK-NEXT: vle16.v v26, (a0) -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vle1.v v0, (a1) -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu ; CHECK-NEXT: vmerge.vim v26, v26, 4, v0 ; CHECK-NEXT: vse16.v v26, (a2) ; CHECK-NEXT: ret @@ -189,9 +171,7 @@ ; CHECK-NEXT: vsetvli zero, a4, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) ; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vle1.v v0, (a2) -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmerge.vvm v28, v8, v28, v0 ; CHECK-NEXT: vse16.v v28, (a3) ; CHECK-NEXT: ret @@ -209,9 +189,7 @@ ; CHECK-NEXT: addi a3, zero, 32 ; CHECK-NEXT: vsetvli zero, a3, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vle1.v v0, (a1) -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vfmerge.vfm v28, v28, fa0, v0 ; CHECK-NEXT: vse16.v v28, (a2) ; CHECK-NEXT: ret @@ -230,9 +208,7 @@ ; CHECK-NEXT: addi a3, zero, 32 ; CHECK-NEXT: vsetvli zero, a3, e16,m4,ta,mu ; CHECK-NEXT: vle16.v v28, (a0) -; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vle1.v v0, (a1) -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu ; CHECK-NEXT: vmerge.vim v28, v28, 0, v0 ; CHECK-NEXT: vse16.v v28, (a2) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/saddo-sdnode.ll @@ -10,9 +10,7 @@ ; CHECK-NEXT: vadd.vv v25, v8, v9 ; CHECK-NEXT: vmslt.vv v26, v25, v8 ; CHECK-NEXT: vmsle.vi v27, v9, -1 -; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v27, v26 -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu ; CHECK-NEXT: vmerge.vim v8, v25, 0, v0 ; CHECK-NEXT: ret %a = call { , } @llvm.sadd.with.overflow.nxv2i32( %x, %y) diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv32.ll @@ -291,7 +291,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v10 ; CHECK-NEXT: vmflt.vv v26, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -304,7 +303,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -319,7 +317,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -356,7 +353,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v10, v10 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -370,7 +366,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -386,7 +381,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -401,7 +395,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v10, v10 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -415,7 +408,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -430,7 +422,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v10 ; CHECK-NEXT: vmflt.vv v26, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -443,7 +434,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -458,7 +448,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -494,7 +483,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vv v25, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -506,7 +494,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -520,7 +507,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -556,7 +542,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -568,7 +553,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -582,7 +566,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -618,7 +601,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vv v25, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -630,7 +612,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -644,7 +625,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -680,7 +660,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -692,7 +671,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -706,7 +684,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -799,7 +776,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vv v25, v10, v10 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -813,7 +789,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -829,7 +804,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -844,7 +818,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vv v25, v10, v10 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -858,7 +831,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -1153,7 +1125,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v12 ; CHECK-NEXT: vmflt.vv v26, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -1166,7 +1137,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1181,7 +1151,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1218,7 +1187,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v12, v12 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -1232,7 +1200,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfeq.vf v25, v28, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1248,7 +1215,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfeq.vf v25, v28, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1263,7 +1229,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v12, v12 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -1277,7 +1242,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfeq.vf v25, v28, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1292,7 +1256,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v12 ; CHECK-NEXT: vmflt.vv v26, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -1305,7 +1268,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1320,7 +1282,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1356,7 +1317,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vv v25, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -1368,7 +1328,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1382,7 +1341,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1418,7 +1376,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -1430,7 +1387,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1444,7 +1400,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1480,7 +1435,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vv v25, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -1492,7 +1446,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1506,7 +1459,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1542,7 +1494,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -1554,7 +1505,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1568,7 +1518,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1661,7 +1610,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfne.vv v25, v12, v12 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -1675,7 +1623,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfne.vf v25, v28, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1691,7 +1638,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfne.vf v25, v28, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1706,7 +1652,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfne.vv v25, v12, v12 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -1720,7 +1665,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfne.vf v25, v28, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -2015,7 +1959,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -2028,7 +1971,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2043,7 +1985,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2080,7 +2021,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v16, v16 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -2094,7 +2034,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v25, v16, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2110,7 +2049,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v25, v16, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2125,7 +2063,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v16, v16 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -2139,7 +2076,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v25, v16, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2154,7 +2090,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -2167,7 +2102,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2182,7 +2116,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2218,7 +2151,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -2230,7 +2162,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2244,7 +2175,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2280,7 +2210,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -2292,7 +2221,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2306,7 +2234,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2342,7 +2269,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vv v25, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -2354,7 +2280,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2368,7 +2293,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2404,7 +2328,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -2416,7 +2339,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2430,7 +2352,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2523,7 +2444,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfne.vv v25, v16, v16 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -2537,7 +2457,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v25, v16, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2553,7 +2472,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v25, v16, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2568,7 +2486,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfne.vv v25, v16, v16 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -2582,7 +2499,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v25, v16, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-rv64.ll @@ -291,7 +291,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v10 ; CHECK-NEXT: vmflt.vv v26, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -304,7 +303,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -319,7 +317,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -356,7 +353,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v10, v10 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -370,7 +366,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -386,7 +381,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -401,7 +395,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v10, v10 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -415,7 +408,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfeq.vf v25, v26, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -430,7 +422,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v10 ; CHECK-NEXT: vmflt.vv v26, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -443,7 +434,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -458,7 +448,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -494,7 +483,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vv v25, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -506,7 +494,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -520,7 +507,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -556,7 +542,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -568,7 +553,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -582,7 +566,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -618,7 +601,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vv v25, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -630,7 +612,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -644,7 +625,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -680,7 +660,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vv v25, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -692,7 +671,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -706,7 +684,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -799,7 +776,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vv v25, v10, v10 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -813,7 +789,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -829,7 +804,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -844,7 +818,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vmfne.vv v25, v10, v10 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -858,7 +831,6 @@ ; CHECK-NEXT: vfmv.v.f v26, fa0 ; CHECK-NEXT: vmfne.vf v25, v26, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, half %b, i32 0 @@ -1153,7 +1125,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v12 ; CHECK-NEXT: vmflt.vv v26, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -1166,7 +1137,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1181,7 +1151,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1218,7 +1187,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v12, v12 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -1232,7 +1200,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfeq.vf v25, v28, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1248,7 +1215,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfeq.vf v25, v28, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1263,7 +1229,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v12, v12 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -1277,7 +1242,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfeq.vf v25, v28, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1292,7 +1256,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v12 ; CHECK-NEXT: vmflt.vv v26, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -1305,7 +1268,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1320,7 +1282,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1356,7 +1317,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vv v25, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -1368,7 +1328,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1382,7 +1341,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1418,7 +1376,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -1430,7 +1387,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1444,7 +1400,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1480,7 +1435,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vv v25, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -1492,7 +1446,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1506,7 +1459,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1542,7 +1494,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vv v25, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -1554,7 +1505,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1568,7 +1518,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1661,7 +1610,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfne.vv v25, v12, v12 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -1675,7 +1623,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfne.vf v25, v28, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1691,7 +1638,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfne.vf v25, v28, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -1706,7 +1652,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vmfne.vv v25, v12, v12 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -1720,7 +1665,6 @@ ; CHECK-NEXT: vfmv.v.f v28, fa0 ; CHECK-NEXT: vmfne.vf v25, v28, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, float %b, i32 0 @@ -2015,7 +1959,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp one %va, %vb @@ -2028,7 +1971,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2043,7 +1985,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2080,7 +2021,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v16, v16 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -2094,7 +2034,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v25, v16, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2110,7 +2049,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v25, v16, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2125,7 +2063,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfeq.vv v25, v16, v16 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ord %va, %vb @@ -2139,7 +2076,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfeq.vf v25, v16, fa0 ; CHECK-NEXT: vmfeq.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmand.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2154,7 +2090,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v16 ; CHECK-NEXT: vmflt.vv v26, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp ueq %va, %vb @@ -2167,7 +2102,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 ; CHECK-NEXT: vmfgt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2182,7 +2116,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 ; CHECK-NEXT: vmflt.vf v26, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2218,7 +2151,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ugt %va, %vb @@ -2230,7 +2162,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2244,7 +2175,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2280,7 +2210,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp uge %va, %vb @@ -2292,7 +2221,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2306,7 +2234,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2342,7 +2269,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vv v25, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ult %va, %vb @@ -2354,7 +2280,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfge.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2368,7 +2293,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfle.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2404,7 +2328,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vv v25, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %vc = fcmp ule %va, %vb @@ -2416,7 +2339,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfgt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2430,7 +2352,6 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmflt.vf v25, v8, fa0 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmnand.mm v0, v25, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2523,7 +2444,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfne.vv v25, v16, v16 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -2537,7 +2457,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v25, v16, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2553,7 +2472,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v25, v16, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v25, v26 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 @@ -2568,7 +2486,6 @@ ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vmfne.vv v25, v16, v16 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %vc = fcmp uno %va, %vb @@ -2582,7 +2499,6 @@ ; CHECK-NEXT: vfmv.v.f v16, fa0 ; CHECK-NEXT: vmfne.vf v25, v16, fa0 ; CHECK-NEXT: vmfne.vv v26, v8, v8 -; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmor.mm v0, v26, v25 ; CHECK-NEXT: ret %head = insertelement undef, double %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -1266,7 +1266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1364,7 +1364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1413,7 +1413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1511,7 +1511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1560,7 +1560,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1658,7 +1658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -1266,7 +1266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1364,7 +1364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1413,7 +1413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1511,7 +1511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1560,7 +1560,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1658,7 +1658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1707,7 +1707,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1756,7 +1756,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1805,7 +1805,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -1266,7 +1266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1364,7 +1364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1413,7 +1413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1511,7 +1511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1560,7 +1560,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1658,7 +1658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -1266,7 +1266,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1315,7 +1315,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1364,7 +1364,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1413,7 +1413,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1462,7 +1462,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1511,7 +1511,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1560,7 +1560,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1609,7 +1609,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1658,7 +1658,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: @@ -1707,7 +1707,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v9 ; CHECK-NEXT: ret entry: @@ -1756,7 +1756,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v10 ; CHECK-NEXT: ret entry: @@ -1805,7 +1805,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t -; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu +; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu ; CHECK-NEXT: vmxor.mm v0, v25, v12 ; CHECK-NEXT: ret entry: