diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -82,6 +82,11 @@ // explicit operand. Used by RVV Pseudos. HasVecPolicyOpShift = HasVLOpShift + 1, HasVecPolicyOpMask = 1 << HasVecPolicyOpShift, + + // Is this instruction a vector widening reduction instruction. Used by RVV + // Pseudos. + IsRVVWideningReductionShift = HasVecPolicyOpShift + 1, + IsRVVWideningReductionMask = 1 << IsRVVWideningReductionShift, }; // Match with the definitions in RISCVInstrFormatsV.td @@ -146,6 +151,10 @@ static inline bool hasVecPolicyOp(uint64_t TSFlags) { return TSFlags & HasVecPolicyOpMask; } +/// \returns true if it is a vector widening reduction instruction. +static inline bool isRVVWideningReduction(uint64_t TSFlags) { + return TSFlags & IsRVVWideningReductionMask; +} // RISC-V Specific Machine Operand Flags enum { diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -181,6 +181,9 @@ bit HasVecPolicyOp = 0; let TSFlags{16} = HasVecPolicyOp; + + bit IsRVVWideningReduction = 0; + let TSFlags{17} = IsRVVWideningReduction; } // Pseudo instructions diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -36,6 +36,10 @@ #define GET_INSTRINFO_CTOR_DTOR #include "RISCVGenInstrInfo.inc" +static cl::opt PreferWholeRegisterMove( + "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, + cl::desc("Prefer whole register move for vector registers.")); + namespace llvm { namespace RISCVVPseudosTable { @@ -116,6 +120,136 @@ return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs; } +static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, + const MachineBasicBlock &MBB, + MachineBasicBlock::const_iterator MBBI, + MachineBasicBlock::const_iterator &DefMBBI, + RISCVII::VLMUL LMul) { + if (PreferWholeRegisterMove) + return false; + + assert(MBBI->getOpcode() == TargetOpcode::COPY && + "Unexpected COPY instruction."); + Register SrcReg = MBBI->getOperand(1).getReg(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); + + bool FoundDef = false; + bool FirstVSetVLI = false; + unsigned FirstSEW = 0; + while (MBBI != MBB.begin()) { + --MBBI; + if (MBBI->isMetaInstruction()) + continue; + + if (MBBI->getOpcode() == RISCV::PseudoVSETVLI || + MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 || + MBBI->getOpcode() == RISCV::PseudoVSETIVLI) { + // There is a vsetvli between COPY and source define instruction. + // vy = def_vop ... (producing instruction) + // ... + // vsetvli + // ... + // vx = COPY vy + if (!FoundDef) { + if (!FirstVSetVLI) { + FirstVSetVLI = true; + unsigned FirstVType = MBBI->getOperand(2).getImm(); + RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType); + FirstSEW = RISCVVType::getSEW(FirstVType); + // The first encountered vsetvli must have the same lmul as the + // register class of COPY. + if (FirstLMul != LMul) + return false; + } + // Only permit `vsetvli x0, x0, vtype` between COPY and the source + // define instruction. + if (MBBI->getOperand(0).getReg() != RISCV::X0) + return false; + if (MBBI->getOperand(1).isImm()) + return false; + if (MBBI->getOperand(1).getReg() != RISCV::X0) + return false; + continue; + } + + // MBBI is the first vsetvli before the producing instruction. + unsigned VType = MBBI->getOperand(2).getImm(); + // If there is a vsetvli between COPY and the producing instruction. + if (FirstVSetVLI) { + // If SEW is different, return false. + if (RISCVVType::getSEW(VType) != FirstSEW) + return false; + } + + // If the vsetvli is tail undisturbed, keep the whole register move. + if (!RISCVVType::isTailAgnostic(VType)) + return false; + + // The checking is conservative. We only have register classes for + // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v + // for fractional LMUL operations. However, we could not use the vsetvli + // lmul for widening operations. The result of widening operation is + // 2 x LMUL. + return LMul == RISCVVType::getVLMUL(VType); + } else if (MBBI->isInlineAsm() || MBBI->isCall()) { + return false; + } else if (MBBI->getNumDefs()) { + // Check all the instructions which will change VL. + // For example, vleff has implicit def VL. + if (MBBI->modifiesRegister(RISCV::VL)) + return false; + + // Go through all defined operands, including implicit defines. + for (const MachineOperand &MO : MBBI->operands()) { + if (!MO.isReg() || !MO.isDef()) + continue; + if (!FoundDef && TRI->isSubRegisterEq(MO.getReg(), SrcReg)) { + // We only permit the source of COPY has the same LMUL as the defined + // operand. + // There are cases we need to keep the whole register copy if the LMUL + // is different. + // For example, + // $x0 = PseudoVSETIVLI 4, 73 // vsetivli zero, 4, e16,m2,ta,m + // $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2 + // # The COPY may be created by vlmul_trunc intrinsic. + // $v26m2 = COPY renamable $v28m2, implicit killed $v28m4 + // + // After widening, the valid value will be 4 x e32 elements. If we + // convert the COPY to vmv.v.v, it will only copy 4 x e16 elements. + // FIXME: The COPY of subregister of Zvlsseg register will not be able + // to convert to vmv.v.[v|i] under the constraint. + if (MO.getReg() != SrcReg) + return false; + + // In widening reduction instructions with LMUL_1 input vector case, + // only checking the LMUL is insufficient due to reduction result is + // always LMUL_1. + // For example, + // $x11 = PseudoVSETIVLI 1, 64 // vsetivli a1, 1, e8, m1, ta, mu + // $v8m1 = PseudoVWREDSUM_VS_M1 $v26, $v27 + // $v26 = COPY killed renamable $v8 + // After widening, The valid value will be 1 x e16 elements. If we + // convert the COPY to vmv.v.v, it will only copy 1 x e8 elements. + uint64_t TSFlags = MBBI->getDesc().TSFlags; + if (RISCVII::isRVVWideningReduction(TSFlags)) + return false; + + // Found the definition. + FoundDef = true; + DefMBBI = MBBI; + // If the producing instruction does not depend on vsetvli, do not + // convert COPY to vmv.v.v. For example, VL1R_V or PseudoVRELOAD. + if (!RISCVII::hasSEWOp(TSFlags)) + return false; + break; + } + } + } + } + + return false; +} + void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, @@ -131,7 +265,7 @@ unsigned Opc; bool IsScalableVector = true; unsigned NF = 1; - unsigned LMul = 1; + RISCVII::VLMUL LMul = RISCVII::LMUL_1; unsigned SubRegIdx = RISCV::sub_vrm1_0; if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::FSGNJ_H; @@ -144,91 +278,157 @@ IsScalableVector = false; } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV4R_V; + LMul = RISCVII::LMUL_4; } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV8R_V; + LMul = RISCVII::LMUL_8; } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 2; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 2; - LMul = 2; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV4R_V; SubRegIdx = RISCV::sub_vrm4_0; NF = 2; - LMul = 4; + LMul = RISCVII::LMUL_4; } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 3; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 3; - LMul = 2; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 4; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 4; - LMul = 2; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 5; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 6; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 7; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 8; - LMul = 1; + LMul = RISCVII::LMUL_1; } else { llvm_unreachable("Impossible reg-to-reg copy"); } if (IsScalableVector) { + bool UseVMV_V_V = false; + MachineBasicBlock::const_iterator DefMBBI; + unsigned DefExplicitOpNum; + unsigned VIOpc; + if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) { + UseVMV_V_V = true; + DefExplicitOpNum = DefMBBI->getNumExplicitOperands(); + // We only need to handle LMUL = 1/2/4/8 here because we only define + // vector register classes for LMUL = 1/2/4/8. + switch (LMul) { + default: + llvm_unreachable("Impossible LMUL for vector register copy."); + case RISCVII::LMUL_1: + Opc = RISCV::PseudoVMV_V_V_M1; + VIOpc = RISCV::PseudoVMV_V_I_M1; + break; + case RISCVII::LMUL_2: + Opc = RISCV::PseudoVMV_V_V_M2; + VIOpc = RISCV::PseudoVMV_V_I_M2; + break; + case RISCVII::LMUL_4: + Opc = RISCV::PseudoVMV_V_V_M4; + VIOpc = RISCV::PseudoVMV_V_I_M4; + break; + case RISCVII::LMUL_8: + Opc = RISCV::PseudoVMV_V_V_M8; + VIOpc = RISCV::PseudoVMV_V_I_M8; + break; + } + } + + bool UseVMV_V_I = false; + if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) { + UseVMV_V_I = true; + Opc = VIOpc; + } + if (NF == 1) { - BuildMI(MBB, MBBI, DL, get(Opc), DstReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg); + if (UseVMV_V_I) + MIB = MIB.add(DefMBBI->getOperand(1)); + else + MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc)); + if (UseVMV_V_V) { + // The last two arguments of vector instructions are + // AVL, SEW. We also need to append the implicit-use vl and vtype. + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW + MIB.addReg(RISCV::VL, RegState::Implicit); + MIB.addReg(RISCV::VTYPE, RegState::Implicit); + } } else { const TargetRegisterInfo *TRI = STI.getRegisterInfo(); int I = 0, End = NF, Incr = 1; unsigned SrcEncoding = TRI->getEncodingValue(SrcReg); unsigned DstEncoding = TRI->getEncodingValue(DstReg); - if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMul)) { + unsigned LMulVal; + bool Fractional; + std::tie(LMulVal, Fractional) = RISCVVType::decodeVLMUL(LMul); + assert(!Fractional && "It is impossible be fractional lmul here."); + if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) { I = NF - 1; End = -1; Incr = -1; } for (; I != End; I += Incr) { - BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I)) - .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I), - getKillRegState(KillSrc)); + auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), + TRI->getSubReg(DstReg, SubRegIdx + I)); + if (UseVMV_V_I) + MIB = MIB.add(DefMBBI->getOperand(1)); + else + MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I), + getKillRegState(KillSrc)); + if (UseVMV_V_V) { + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW + MIB.addReg(RISCV::VL, RegState::Implicit); + MIB.addReg(RISCV::VTYPE, RegState::Implicit); + } } } } else { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3972,8 +3972,10 @@ //===----------------------------------------------------------------------===// // 15.2. Vector Widening Integer Reduction Instructions //===----------------------------------------------------------------------===// +let IsRVVWideningReduction = 1 in { defm PseudoVWREDSUMU : VPseudoReductionV_VS; defm PseudoVWREDSUM : VPseudoReductionV_VS; +} } // Predicates = [HasVInstructions] let Predicates = [HasVInstructionsAnyF] in { @@ -3988,8 +3990,10 @@ //===----------------------------------------------------------------------===// // 15.4. Vector Widening Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// +let IsRVVWideningReduction = 1 in { defm PseudoVFWREDUSUM : VPseudoReductionV_VS; defm PseudoVFWREDOSUM : VPseudoReductionV_VS; +} } // Predicates = [HasVInstructionsAnyF] diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -561,7 +561,7 @@ ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: mv a0, zero -; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call vector_arg_indirect_stack@plt ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 @@ -613,7 +613,7 @@ ; RV64-NEXT: addi a0, a0, 24 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: mv a0, zero -; RV64-NEXT: vmv8r.v v16, v8 +; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call vector_arg_indirect_stack@plt ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -52,7 +52,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: addi a0, sp, 32 -; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call callee_scalable_vector_split_indirect@plt ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -80,7 +80,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: addi a0, sp, 24 -; RV64-NEXT: vmv8r.v v16, v8 +; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call callee_scalable_vector_split_indirect@plt ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -299,7 +299,7 @@ ; LMULMAX8-NEXT: addi a2, zero, 42 ; LMULMAX8-NEXT: addi a3, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a3) -; LMULMAX8-NEXT: vmv8r.v v8, v24 +; LMULMAX8-NEXT: vmv.v.v v8, v24 ; LMULMAX8-NEXT: call ext3@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -328,8 +328,8 @@ ; LMULMAX4-NEXT: addi a3, zero, 42 ; LMULMAX4-NEXT: addi a1, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a1) -; LMULMAX4-NEXT: vmv4r.v v8, v24 -; LMULMAX4-NEXT: vmv4r.v v12, v28 +; LMULMAX4-NEXT: vmv.v.v v8, v24 +; LMULMAX4-NEXT: vmv.v.v v12, v28 ; LMULMAX4-NEXT: call ext3@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -393,7 +393,7 @@ ; LMULMAX8-NEXT: addi a0, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a0) ; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: vmv8r.v v16, v8 +; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -428,9 +428,9 @@ ; LMULMAX4-NEXT: addi a0, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: vmv4r.v v12, v8 -; LMULMAX4-NEXT: vmv4r.v v16, v8 -; LMULMAX4-NEXT: vmv4r.v v20, v8 +; LMULMAX4-NEXT: vmv.v.i v12, 0 +; LMULMAX4-NEXT: vmv.v.i v16, 0 +; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -506,7 +506,7 @@ ; LMULMAX8-NEXT: addi t6, zero, 12 ; LMULMAX8-NEXT: sd a0, 0(sp) ; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: vmv8r.v v16, v8 +; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_direct_stack@plt ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload ; LMULMAX8-NEXT: addi sp, sp, 160 @@ -541,9 +541,9 @@ ; LMULMAX4-NEXT: addi t6, zero, 12 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: vmv4r.v v12, v8 -; LMULMAX4-NEXT: vmv4r.v v16, v8 -; LMULMAX4-NEXT: vmv4r.v v20, v8 +; LMULMAX4-NEXT: vmv.v.i v12, 0 +; LMULMAX4-NEXT: vmv.v.i v16, 0 +; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_direct_stack@plt ; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload ; LMULMAX4-NEXT: addi sp, sp, 160 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -798,7 +798,7 @@ ; LMULMAX8-NEXT: addi a2, zero, 42 ; LMULMAX8-NEXT: addi a3, sp, 128 ; LMULMAX8-NEXT: vse32.v v8, (a3) -; LMULMAX8-NEXT: vmv8r.v v8, v24 +; LMULMAX8-NEXT: vmv.v.v v8, v24 ; LMULMAX8-NEXT: call ext3@plt ; LMULMAX8-NEXT: addi sp, s0, -384 ; LMULMAX8-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -827,8 +827,8 @@ ; LMULMAX4-NEXT: addi a3, zero, 42 ; LMULMAX4-NEXT: addi a1, sp, 128 ; LMULMAX4-NEXT: vse32.v v8, (a1) -; LMULMAX4-NEXT: vmv4r.v v8, v24 -; LMULMAX4-NEXT: vmv4r.v v12, v28 +; LMULMAX4-NEXT: vmv.v.v v8, v24 +; LMULMAX4-NEXT: vmv.v.v v12, v28 ; LMULMAX4-NEXT: call ext3@plt ; LMULMAX4-NEXT: addi sp, s0, -384 ; LMULMAX4-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -865,10 +865,10 @@ ; LMULMAX2-NEXT: addi a5, zero, 42 ; LMULMAX2-NEXT: addi a1, sp, 128 ; LMULMAX2-NEXT: vse32.v v8, (a1) -; LMULMAX2-NEXT: vmv2r.v v8, v24 -; LMULMAX2-NEXT: vmv2r.v v10, v26 -; LMULMAX2-NEXT: vmv2r.v v12, v28 -; LMULMAX2-NEXT: vmv2r.v v14, v30 +; LMULMAX2-NEXT: vmv.v.v v8, v24 +; LMULMAX2-NEXT: vmv.v.v v10, v26 +; LMULMAX2-NEXT: vmv.v.v v12, v28 +; LMULMAX2-NEXT: vmv.v.v v14, v30 ; LMULMAX2-NEXT: call ext3@plt ; LMULMAX2-NEXT: addi sp, s0, -384 ; LMULMAX2-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -924,14 +924,14 @@ ; LMULMAX1-NEXT: addi a0, sp, 128 ; LMULMAX1-NEXT: addi a1, sp, 128 ; LMULMAX1-NEXT: vse32.v v8, (a1) -; LMULMAX1-NEXT: vmv1r.v v8, v24 -; LMULMAX1-NEXT: vmv1r.v v9, v25 -; LMULMAX1-NEXT: vmv1r.v v10, v26 -; LMULMAX1-NEXT: vmv1r.v v11, v27 -; LMULMAX1-NEXT: vmv1r.v v12, v28 -; LMULMAX1-NEXT: vmv1r.v v13, v29 -; LMULMAX1-NEXT: vmv1r.v v14, v30 -; LMULMAX1-NEXT: vmv1r.v v15, v31 +; LMULMAX1-NEXT: vmv.v.v v8, v24 +; LMULMAX1-NEXT: vmv.v.v v9, v25 +; LMULMAX1-NEXT: vmv.v.v v10, v26 +; LMULMAX1-NEXT: vmv.v.v v11, v27 +; LMULMAX1-NEXT: vmv.v.v v12, v28 +; LMULMAX1-NEXT: vmv.v.v v13, v29 +; LMULMAX1-NEXT: vmv.v.v v14, v30 +; LMULMAX1-NEXT: vmv.v.v v15, v31 ; LMULMAX1-NEXT: call ext3@plt ; LMULMAX1-NEXT: addi sp, s0, -384 ; LMULMAX1-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -1109,7 +1109,7 @@ ; LMULMAX2-NEXT: vmv1r.v v10, v8 ; LMULMAX2-NEXT: vmv1r.v v11, v8 ; LMULMAX2-NEXT: vmv1r.v v12, v8 -; LMULMAX2-NEXT: vmv2r.v v22, v14 +; LMULMAX2-NEXT: vmv.v.v v22, v14 ; LMULMAX2-NEXT: call split_vector_args@plt ; LMULMAX2-NEXT: addi sp, s0, -256 ; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -1161,9 +1161,9 @@ ; LMULMAX1-NEXT: vmv1r.v v10, v8 ; LMULMAX1-NEXT: vmv1r.v v11, v8 ; LMULMAX1-NEXT: vmv1r.v v12, v8 -; LMULMAX1-NEXT: vmv1r.v v21, v13 -; LMULMAX1-NEXT: vmv1r.v v22, v14 -; LMULMAX1-NEXT: vmv1r.v v23, v15 +; LMULMAX1-NEXT: vmv.v.v v21, v13 +; LMULMAX1-NEXT: vmv.v.v v22, v14 +; LMULMAX1-NEXT: vmv.v.v v23, v15 ; LMULMAX1-NEXT: call split_vector_args@plt ; LMULMAX1-NEXT: addi sp, s0, -256 ; LMULMAX1-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -1282,7 +1282,7 @@ ; LMULMAX8-NEXT: addi a7, zero, 7 ; LMULMAX8-NEXT: sd a0, 128(sp) ; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: vmv8r.v v16, v8 +; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_via_stack@plt ; LMULMAX8-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX8-NEXT: addi sp, sp, 144 @@ -1309,9 +1309,9 @@ ; LMULMAX4-NEXT: addi a7, zero, 7 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: vmv4r.v v12, v8 -; LMULMAX4-NEXT: vmv4r.v v16, v8 -; LMULMAX4-NEXT: vmv4r.v v20, v8 +; LMULMAX4-NEXT: vmv.v.i v12, 0 +; LMULMAX4-NEXT: vmv.v.i v16, 0 +; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_via_stack@plt ; LMULMAX4-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX4-NEXT: addi sp, sp, 144 @@ -1342,13 +1342,13 @@ ; LMULMAX2-NEXT: addi a7, zero, 7 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: mv a0, zero -; LMULMAX2-NEXT: vmv2r.v v10, v8 -; LMULMAX2-NEXT: vmv2r.v v12, v8 -; LMULMAX2-NEXT: vmv2r.v v14, v8 -; LMULMAX2-NEXT: vmv2r.v v16, v8 -; LMULMAX2-NEXT: vmv2r.v v18, v8 -; LMULMAX2-NEXT: vmv2r.v v20, v8 -; LMULMAX2-NEXT: vmv2r.v v22, v8 +; LMULMAX2-NEXT: vmv.v.i v10, 0 +; LMULMAX2-NEXT: vmv.v.i v12, 0 +; LMULMAX2-NEXT: vmv.v.i v14, 0 +; LMULMAX2-NEXT: vmv.v.i v16, 0 +; LMULMAX2-NEXT: vmv.v.i v18, 0 +; LMULMAX2-NEXT: vmv.v.i v20, 0 +; LMULMAX2-NEXT: vmv.v.i v22, 0 ; LMULMAX2-NEXT: call vector_arg_via_stack@plt ; LMULMAX2-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX2-NEXT: addi sp, sp, 144 @@ -1387,21 +1387,21 @@ ; LMULMAX1-NEXT: addi a7, zero, 7 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: mv a0, zero -; LMULMAX1-NEXT: vmv1r.v v9, v8 -; LMULMAX1-NEXT: vmv1r.v v10, v8 -; LMULMAX1-NEXT: vmv1r.v v11, v8 -; LMULMAX1-NEXT: vmv1r.v v12, v8 -; LMULMAX1-NEXT: vmv1r.v v13, v8 -; LMULMAX1-NEXT: vmv1r.v v14, v8 -; LMULMAX1-NEXT: vmv1r.v v15, v8 -; LMULMAX1-NEXT: vmv1r.v v16, v8 -; LMULMAX1-NEXT: vmv1r.v v17, v8 -; LMULMAX1-NEXT: vmv1r.v v18, v8 -; LMULMAX1-NEXT: vmv1r.v v19, v8 -; LMULMAX1-NEXT: vmv1r.v v20, v8 -; LMULMAX1-NEXT: vmv1r.v v21, v8 -; LMULMAX1-NEXT: vmv1r.v v22, v8 -; LMULMAX1-NEXT: vmv1r.v v23, v8 +; LMULMAX1-NEXT: vmv.v.i v9, 0 +; LMULMAX1-NEXT: vmv.v.i v10, 0 +; LMULMAX1-NEXT: vmv.v.i v11, 0 +; LMULMAX1-NEXT: vmv.v.i v12, 0 +; LMULMAX1-NEXT: vmv.v.i v13, 0 +; LMULMAX1-NEXT: vmv.v.i v14, 0 +; LMULMAX1-NEXT: vmv.v.i v15, 0 +; LMULMAX1-NEXT: vmv.v.i v16, 0 +; LMULMAX1-NEXT: vmv.v.i v17, 0 +; LMULMAX1-NEXT: vmv.v.i v18, 0 +; LMULMAX1-NEXT: vmv.v.i v19, 0 +; LMULMAX1-NEXT: vmv.v.i v20, 0 +; LMULMAX1-NEXT: vmv.v.i v21, 0 +; LMULMAX1-NEXT: vmv.v.i v22, 0 +; LMULMAX1-NEXT: vmv.v.i v23, 0 ; LMULMAX1-NEXT: call vector_arg_via_stack@plt ; LMULMAX1-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX1-NEXT: addi sp, sp, 144 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -89,7 +89,7 @@ ; RV32-NEXT: vle16.v v12, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_vu_v4f64: @@ -99,7 +99,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v10, v8, v12 -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> undef, <4 x i32> ret <4 x double> %s @@ -114,7 +114,7 @@ ; RV32-NEXT: vle16.v v12, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_uv_v4f64: @@ -124,7 +124,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v10, v8, v12 -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %s = shufflevector <4 x double> undef, <4 x double> %x, <4 x i32> ret <4 x double> %s @@ -144,7 +144,7 @@ ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vrgather.vi v12, v10, 1, v0.t -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v4f64: @@ -159,7 +159,7 @@ ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vrgather.vi v12, v10, 1, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> ret <4 x double> %s @@ -179,7 +179,7 @@ ; RV32-NEXT: vrsub.vi v12, v12, 4 ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_xv_v4f64: @@ -194,7 +194,7 @@ ; RV64-NEXT: vid.v v12 ; RV64-NEXT: vrsub.vi v12, v12, 4 ; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %s = shufflevector <4 x double> , <4 x double> %x, <4 x i32> ret <4 x double> %s @@ -214,7 +214,7 @@ ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vlse64.v v10, (a0), zero ; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vx_v4f64: @@ -230,7 +230,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vlse64.v v10, (a0), zero ; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> , <4 x i32> ret <4 x double> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -145,7 +145,7 @@ ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v16 -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_vu_v8i64: @@ -155,7 +155,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v16 -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> undef, <8 x i32> ret <8 x i64> %s @@ -170,7 +170,7 @@ ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v16 -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_uv_v8i64: @@ -180,7 +180,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vrgather.vv v12, v8, v16 -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %s = shufflevector <8 x i64> undef, <8 x i64> %x, <8 x i32> ret <8 x i64> %s @@ -206,7 +206,7 @@ ; RV32-NEXT: vmv.s.x v0, a0 ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v16, v12, v20, v0.t -; RV32-NEXT: vmv4r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_vv_v8i64: @@ -227,7 +227,7 @@ ; RV64-NEXT: vmv.s.x v0, a0 ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vrgather.vv v16, v12, v20, v0.t -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> ret <8 x i64> %s @@ -252,7 +252,7 @@ ; RV32-NEXT: vle16.v v16, (a0) ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vrgatherei16.vv v12, v8, v16, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_shuffle_xv_v8i64: @@ -266,7 +266,7 @@ ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: vrgather.vv v12, v8, v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %s = shufflevector <8 x i64> , <8 x i64> %x, <8 x i32> ret <8 x i64> %s @@ -305,7 +305,7 @@ ; RV64-NEXT: vle64.v v16, (a0) ; RV64-NEXT: vmv.v.i v12, 5 ; RV64-NEXT: vrgather.vv v12, v8, v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> , <8 x i32> ret <8 x i64> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -955,7 +955,7 @@ ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: lui a1, 1048568 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu -; CHECK-NEXT: vmv1r.v v12, v11 +; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv.s.x v12, a1 ; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, mu ; CHECK-NEXT: vslideup.vi v11, v9, 6 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -452,14 +452,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) ret <8 x i16> %v @@ -473,7 +473,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8i16: @@ -483,7 +483,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -498,7 +498,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i16: @@ -508,7 +508,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -524,7 +524,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i16: @@ -534,7 +534,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %eidxs @@ -550,7 +550,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16: @@ -560,7 +560,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs %v = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, i32 2, <8 x i1> %m, <8 x i16> %passthru) @@ -656,14 +656,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, i32 4, <4 x i1> %m, <4 x i32> %passthru) ret <4 x i32> %v @@ -680,7 +680,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -709,14 +709,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) ret <8 x i32> %v @@ -729,7 +729,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8i32: @@ -739,7 +739,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -753,7 +753,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i32: @@ -763,7 +763,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -778,7 +778,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i32: @@ -788,7 +788,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -803,7 +803,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8i32: @@ -813,7 +813,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -827,7 +827,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8i32: @@ -837,7 +837,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -852,7 +852,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8i32: @@ -862,7 +862,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %eidxs @@ -876,7 +876,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i32: @@ -886,7 +886,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs %v = call <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, i32 4, <8 x i1> %m, <8 x i32> %passthru) @@ -900,14 +900,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %v = call <1 x i64> @llvm.masked.gather.v1i64.v1p0i64(<1 x i64*> %ptrs, i32 8, <1 x i1> %m, <1 x i64> %passthru) ret <1 x i64> %v @@ -920,14 +920,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %v = call <2 x i64> @llvm.masked.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, i32 8, <2 x i1> %m, <2 x i64> %passthru) ret <2 x i64> %v @@ -940,14 +940,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call <4 x i64> @llvm.masked.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, i32 8, <4 x i1> %m, <4 x i64> %passthru) ret <4 x i64> %v @@ -958,7 +958,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4i64: @@ -993,14 +993,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) ret <8 x i64> %v @@ -1014,7 +1014,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8i64: @@ -1023,7 +1023,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1037,7 +1037,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8i64: @@ -1046,7 +1046,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1061,7 +1061,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8i64: @@ -1070,7 +1070,7 @@ ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1086,7 +1086,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8i64: @@ -1095,7 +1095,7 @@ ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1109,7 +1109,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8i64: @@ -1118,7 +1118,7 @@ ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1133,7 +1133,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8i64: @@ -1142,7 +1142,7 @@ ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1157,7 +1157,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i32_v8i64: @@ -1166,7 +1166,7 @@ ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1180,7 +1180,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i32_v8i64: @@ -1189,7 +1189,7 @@ ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1204,7 +1204,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i32_v8i64: @@ -1213,7 +1213,7 @@ ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %eidxs @@ -1227,7 +1227,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i64: @@ -1235,7 +1235,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs %v = call <8 x i64> @llvm.masked.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, i32 8, <8 x i1> %m, <8 x i64> %passthru) @@ -1343,14 +1343,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) ret <8 x half> %v @@ -1364,7 +1364,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8f16: @@ -1374,7 +1374,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -1389,7 +1389,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f16: @@ -1399,7 +1399,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1415,7 +1415,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f16: @@ -1425,7 +1425,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i16> %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %eidxs @@ -1441,7 +1441,7 @@ ; RV32-NEXT: vadd.vv v10, v10, v10 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8f16: @@ -1451,7 +1451,7 @@ ; RV64-NEXT: vadd.vv v12, v12, v12 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v12, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs %v = call <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*> %ptrs, i32 2, <8 x i1> %m, <8 x half> %passthru) @@ -1505,14 +1505,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*> %ptrs, i32 4, <4 x i1> %m, <4 x float> %passthru) ret <4 x float> %v @@ -1529,7 +1529,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1558,14 +1558,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) ret <8 x float> %v @@ -1578,7 +1578,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8f32: @@ -1588,7 +1588,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -1602,7 +1602,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f32: @@ -1612,7 +1612,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1627,7 +1627,7 @@ ; RV32-NEXT: vzext.vf4 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f32: @@ -1637,7 +1637,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1652,7 +1652,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8f32: @@ -1662,7 +1662,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -1676,7 +1676,7 @@ ; RV32-NEXT: vsext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8f32: @@ -1686,7 +1686,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1701,7 +1701,7 @@ ; RV32-NEXT: vzext.vf2 v12, v8 ; RV32-NEXT: vsll.vi v8, v12, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8f32: @@ -1711,7 +1711,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i32> %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %eidxs @@ -1725,7 +1725,7 @@ ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v10, (a0), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8f32: @@ -1735,7 +1735,7 @@ ; RV64-NEXT: vsll.vi v12, v12, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v12, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs %v = call <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*> %ptrs, i32 4, <8 x i1> %m, <8 x float> %passthru) @@ -1749,14 +1749,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v1f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %v = call <1 x double> @llvm.masked.gather.v1f64.v1p0f64(<1 x double*> %ptrs, i32 8, <1 x i1> %m, <1 x double> %passthru) ret <1 x double> %v @@ -1769,14 +1769,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %v = call <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*> %ptrs, i32 8, <2 x i1> %m, <2 x double> %passthru) ret <2 x double> %v @@ -1789,14 +1789,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64(<4 x double*> %ptrs, i32 8, <4 x i1> %m, <4 x double> %passthru) ret <4 x double> %v @@ -1807,7 +1807,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4f64: @@ -1842,14 +1842,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_v8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) ret <8 x double> %v @@ -1863,7 +1863,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i8_v8f64: @@ -1872,7 +1872,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -1886,7 +1886,7 @@ ; RV32-NEXT: vsext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i8_v8f64: @@ -1895,7 +1895,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1910,7 +1910,7 @@ ; RV32-NEXT: vzext.vf8 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i8_v8f64: @@ -1919,7 +1919,7 @@ ; RV64-NEXT: vzext.vf8 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext <8 x i8> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1935,7 +1935,7 @@ ; RV32-NEXT: vsll.vi v8, v10, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i16_v8f64: @@ -1944,7 +1944,7 @@ ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -1958,7 +1958,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i16_v8f64: @@ -1967,7 +1967,7 @@ ; RV64-NEXT: vsext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -1982,7 +1982,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i16_v8f64: @@ -1991,7 +1991,7 @@ ; RV64-NEXT: vzext.vf4 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext <8 x i16> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -2006,7 +2006,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8i32_v8f64: @@ -2015,7 +2015,7 @@ ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -2029,7 +2029,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_v8i32_v8f64: @@ -2038,7 +2038,7 @@ ; RV64-NEXT: vsext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -2053,7 +2053,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_v8i32_v8f64: @@ -2062,7 +2062,7 @@ ; RV64-NEXT: vzext.vf2 v16, v8 ; RV64-NEXT: vsll.vi v8, v16, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext <8 x i32> %idxs to <8 x i64> %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %eidxs @@ -2076,7 +2076,7 @@ ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v8f64: @@ -2084,7 +2084,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v12, (a0), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs %v = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, i32 8, <8 x i1> %m, <8 x double> %passthru) @@ -2100,7 +2100,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v16i8: @@ -2109,7 +2109,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, <16 x i8> %idxs %v = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %ptrs, i32 2, <16 x i1> %m, <16 x i8> %passthru) @@ -2126,7 +2126,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_v32i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll @@ -382,14 +382,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x i16> @llvm.vp.gather.v8i16.v8p0i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x i16> %v @@ -563,7 +563,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call <4 x i32> @llvm.vp.gather.v4i32.v4p0i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x i32> %v @@ -580,7 +580,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -601,7 +601,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x i32> @llvm.vp.gather.v8i32.v8p0i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x i32> %v @@ -778,7 +778,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2i64: @@ -797,7 +797,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4i64: @@ -814,7 +814,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4i64: @@ -835,7 +835,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8i64: @@ -1085,14 +1085,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x half> @llvm.vp.gather.v8f16.v8p0f16(<8 x half*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x half> %v @@ -1224,7 +1224,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call <4 x float> @llvm.vp.gather.v4f32.v4p0f32(<4 x float*> %ptrs, <4 x i1> %m, i32 %evl) ret <4 x float> %v @@ -1241,7 +1241,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1262,7 +1262,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call <8 x float> @llvm.vp.gather.v8f32.v8p0f32(<8 x float*> %ptrs, <8 x i1> %m, i32 %evl) ret <8 x float> %v @@ -1439,7 +1439,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v2f64: @@ -1458,7 +1458,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v4f64: @@ -1475,7 +1475,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_v4f64: @@ -1496,7 +1496,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_v8f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -135,7 +135,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf8 v10, v9 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i64: @@ -157,7 +157,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf8 v10, v9 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i64: @@ -233,14 +233,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i8.nxv8p0i8( %ptrs, i32 1, %m, %passthru) ret %v @@ -253,7 +253,7 @@ ; RV32-NEXT: vsext.vf4 v12, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (a0), v12, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8: @@ -262,7 +262,7 @@ ; RV64-NEXT: vsext.vf8 v16, v8 ; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i8, i8* %base, %idxs %v = call @llvm.masked.gather.nxv8i8.nxv8p0i8( %ptrs, i32 1, %m, %passthru) @@ -358,7 +358,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf4 v10, v9 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i64: @@ -380,7 +380,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf4 v10, v9 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i64: @@ -402,14 +402,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i16.nxv4p0i16( %ptrs, i32 2, %m, %passthru) ret %v @@ -420,14 +420,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -456,14 +456,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i16.nxv8p0i16( %ptrs, i32 2, %m, %passthru) ret %v @@ -477,7 +477,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i16: @@ -487,7 +487,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs %v = call @llvm.masked.gather.nxv8i16.nxv8p0i16( %ptrs, i32 2, %m, %passthru) @@ -502,7 +502,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i16: @@ -512,7 +512,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -528,7 +528,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i16: @@ -538,7 +538,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i16, i16* %base, %eidxs @@ -554,7 +554,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16: @@ -564,7 +564,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i16, i16* %base, %idxs %v = call @llvm.masked.gather.nxv8i16.nxv8p0i16( %ptrs, i32 2, %m, %passthru) @@ -598,14 +598,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i32.nxv2p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -618,7 +618,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vsext.vf2 v10, v9 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32_sextload_nxv2i64: @@ -640,7 +640,7 @@ ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu ; RV32-NEXT: vzext.vf2 v10, v9 -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32_zextload_nxv2i64: @@ -662,14 +662,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i32.nxv4p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -686,7 +686,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -715,14 +715,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) ret %v @@ -735,7 +735,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i32: @@ -745,7 +745,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) @@ -759,7 +759,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i32: @@ -769,7 +769,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -784,7 +784,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32: @@ -794,7 +794,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -809,7 +809,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i32: @@ -819,7 +819,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) @@ -833,7 +833,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32: @@ -843,7 +843,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -858,7 +858,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32: @@ -868,7 +868,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i32, i32* %base, %eidxs @@ -882,7 +882,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i32: @@ -892,7 +892,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i32, i32* %base, %idxs %v = call @llvm.masked.gather.nxv8i32.nxv8p0i32( %ptrs, i32 4, %m, %passthru) @@ -906,14 +906,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1i64.nxv1p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -926,14 +926,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2i64.nxv2p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -946,14 +946,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4i64.nxv4p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -964,7 +964,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i64: @@ -999,14 +999,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1020,7 +1020,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8i64: @@ -1029,7 +1029,7 @@ ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1043,7 +1043,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8i64: @@ -1052,7 +1052,7 @@ ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1067,7 +1067,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64: @@ -1076,7 +1076,7 @@ ; RV64-NEXT: vzext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1092,7 +1092,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8i64: @@ -1101,7 +1101,7 @@ ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1115,7 +1115,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64: @@ -1124,7 +1124,7 @@ ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1139,7 +1139,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64: @@ -1148,7 +1148,7 @@ ; RV64-NEXT: vzext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1163,7 +1163,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8i64: @@ -1172,7 +1172,7 @@ ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1186,7 +1186,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64: @@ -1195,7 +1195,7 @@ ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1210,7 +1210,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64: @@ -1219,7 +1219,7 @@ ; RV64-NEXT: vzext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds i64, i64* %base, %eidxs @@ -1233,7 +1233,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i64: @@ -1241,7 +1241,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds i64, i64* %base, %idxs %v = call @llvm.masked.gather.nxv8i64.nxv8p0i64( %ptrs, i32 8, %m, %passthru) @@ -1361,14 +1361,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f16.nxv4p0f16( %ptrs, i32 2, %m, %passthru) ret %v @@ -1379,14 +1379,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1415,14 +1415,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8f16.nxv8p0f16( %ptrs, i32 2, %m, %passthru) ret %v @@ -1436,7 +1436,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f16: @@ -1446,7 +1446,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs %v = call @llvm.masked.gather.nxv8f16.nxv8p0f16( %ptrs, i32 2, %m, %passthru) @@ -1461,7 +1461,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f16: @@ -1471,7 +1471,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1487,7 +1487,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f16: @@ -1497,7 +1497,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds half, half* %base, %eidxs @@ -1513,7 +1513,7 @@ ; RV32-NEXT: vadd.vv v12, v12, v12 ; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v12, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8f16: @@ -1523,7 +1523,7 @@ ; RV64-NEXT: vadd.vv v16, v16, v16 ; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %ptrs = getelementptr inbounds half, half* %base, %idxs %v = call @llvm.masked.gather.nxv8f16.nxv8p0f16( %ptrs, i32 2, %m, %passthru) @@ -1557,14 +1557,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2f32.nxv2p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1577,14 +1577,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f32.nxv4p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1601,7 +1601,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1630,14 +1630,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) ret %v @@ -1650,7 +1650,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f32: @@ -1660,7 +1660,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) @@ -1674,7 +1674,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f32: @@ -1684,7 +1684,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1699,7 +1699,7 @@ ; RV32-NEXT: vzext.vf4 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32: @@ -1709,7 +1709,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1724,7 +1724,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f32: @@ -1734,7 +1734,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) @@ -1748,7 +1748,7 @@ ; RV32-NEXT: vsext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32: @@ -1758,7 +1758,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1773,7 +1773,7 @@ ; RV32-NEXT: vzext.vf2 v16, v8 ; RV32-NEXT: vsll.vi v8, v16, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32: @@ -1783,7 +1783,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds float, float* %base, %eidxs @@ -1797,7 +1797,7 @@ ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8f32: @@ -1807,7 +1807,7 @@ ; RV64-NEXT: vsll.vi v16, v16, 2 ; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %ptrs = getelementptr inbounds float, float* %base, %idxs %v = call @llvm.masked.gather.nxv8f32.nxv8p0f32( %ptrs, i32 4, %m, %passthru) @@ -1821,14 +1821,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv1f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: vmv.v.v v8, v9 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv1f64.nxv1p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1841,14 +1841,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv2f64.nxv2p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1861,14 +1861,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv4f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv4f64.nxv4p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1879,7 +1879,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f64: @@ -1914,14 +1914,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv8f64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) ret %v @@ -1935,7 +1935,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i8_nxv8f64: @@ -1944,7 +1944,7 @@ ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -1958,7 +1958,7 @@ ; RV32-NEXT: vsext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i8_nxv8f64: @@ -1967,7 +1967,7 @@ ; RV64-NEXT: vsext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -1982,7 +1982,7 @@ ; RV32-NEXT: vzext.vf8 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64: @@ -1991,7 +1991,7 @@ ; RV64-NEXT: vzext.vf8 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2007,7 +2007,7 @@ ; RV32-NEXT: vsll.vi v8, v12, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i16_nxv8f64: @@ -2016,7 +2016,7 @@ ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -2030,7 +2030,7 @@ ; RV32-NEXT: vsext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64: @@ -2039,7 +2039,7 @@ ; RV64-NEXT: vsext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2054,7 +2054,7 @@ ; RV32-NEXT: vzext.vf4 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64: @@ -2063,7 +2063,7 @@ ; RV64-NEXT: vzext.vf4 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2078,7 +2078,7 @@ ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8f64: @@ -2087,7 +2087,7 @@ ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -2101,7 +2101,7 @@ ; RV32-NEXT: vsext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64: @@ -2110,7 +2110,7 @@ ; RV64-NEXT: vsext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = sext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2125,7 +2125,7 @@ ; RV32-NEXT: vzext.vf2 v24, v8 ; RV32-NEXT: vsll.vi v8, v24, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64: @@ -2134,7 +2134,7 @@ ; RV64-NEXT: vzext.vf2 v24, v8 ; RV64-NEXT: vsll.vi v8, v24, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %eidxs = zext %idxs to %ptrs = getelementptr inbounds double, double* %base, %eidxs @@ -2148,7 +2148,7 @@ ; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV32-NEXT: vsll.vi v8, v8, 3 ; RV32-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv8f64: @@ -2156,7 +2156,7 @@ ; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu ; RV64-NEXT: vsll.vi v8, v8, 3 ; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t -; RV64-NEXT: vmv8r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %ptrs = getelementptr inbounds double, double* %base, %idxs %v = call @llvm.masked.gather.nxv8f64.nxv8p0f64( %ptrs, i32 8, %m, %passthru) @@ -2172,7 +2172,7 @@ ; RV32-NEXT: vsext.vf4 v16, v8 ; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_baseidx_nxv16i8: diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -290,7 +290,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 -; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i8: @@ -301,7 +301,7 @@ ; RV32-BITS-256-NEXT: vid.v v9 ; RV32-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-256-NEXT: vrgather.vv v9, v8, v10 -; RV32-BITS-256-NEXT: vmv1r.v v8, v9 +; RV32-BITS-256-NEXT: vmv.v.v v8, v9 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i8: @@ -312,7 +312,7 @@ ; RV32-BITS-512-NEXT: vid.v v9 ; RV32-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV32-BITS-512-NEXT: vrgather.vv v9, v8, v10 -; RV32-BITS-512-NEXT: vmv1r.v v8, v9 +; RV32-BITS-512-NEXT: vmv.v.v v8, v9 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8: @@ -324,7 +324,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v10, v10, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m1, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v9, v8, v10 -; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v9 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v9 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i8: @@ -335,7 +335,7 @@ ; RV64-BITS-256-NEXT: vid.v v9 ; RV64-BITS-256-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-256-NEXT: vrgather.vv v9, v8, v10 -; RV64-BITS-256-NEXT: vmv1r.v v8, v9 +; RV64-BITS-256-NEXT: vmv.v.v v8, v9 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i8: @@ -346,7 +346,7 @@ ; RV64-BITS-512-NEXT: vid.v v9 ; RV64-BITS-512-NEXT: vrsub.vx v10, v9, a0 ; RV64-BITS-512-NEXT: vrgather.vv v9, v8, v10 -; RV64-BITS-512-NEXT: vmv1r.v v8, v9 +; RV64-BITS-512-NEXT: vmv.v.v v8, v9 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i8( %a) ret %res @@ -363,7 +363,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 -; RV32-BITS-UNKNOWN-NEXT: vmv2r.v v8, v10 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i8: @@ -375,7 +375,7 @@ ; RV32-BITS-256-NEXT: vid.v v10 ; RV32-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-256-NEXT: vrgather.vv v10, v8, v12 -; RV32-BITS-256-NEXT: vmv2r.v v8, v10 +; RV32-BITS-256-NEXT: vmv.v.v v8, v10 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i8: @@ -387,7 +387,7 @@ ; RV32-BITS-512-NEXT: vid.v v10 ; RV32-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV32-BITS-512-NEXT: vrgather.vv v10, v8, v12 -; RV32-BITS-512-NEXT: vmv2r.v v8, v10 +; RV32-BITS-512-NEXT: vmv.v.v v8, v10 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i8: @@ -400,7 +400,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v12, v12, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m2, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v10, v8, v12 -; RV64-BITS-UNKNOWN-NEXT: vmv2r.v v8, v10 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v10 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i8: @@ -412,7 +412,7 @@ ; RV64-BITS-256-NEXT: vid.v v10 ; RV64-BITS-256-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-256-NEXT: vrgather.vv v10, v8, v12 -; RV64-BITS-256-NEXT: vmv2r.v v8, v10 +; RV64-BITS-256-NEXT: vmv.v.v v8, v10 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i8: @@ -424,7 +424,7 @@ ; RV64-BITS-512-NEXT: vid.v v10 ; RV64-BITS-512-NEXT: vrsub.vx v12, v10, a0 ; RV64-BITS-512-NEXT: vrgather.vv v10, v8, v12 -; RV64-BITS-512-NEXT: vmv2r.v v8, v10 +; RV64-BITS-512-NEXT: vmv.v.v v8, v10 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i8( %a) ret %res @@ -441,7 +441,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 -; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v8, v12 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i8: @@ -453,7 +453,7 @@ ; RV32-BITS-256-NEXT: vid.v v12 ; RV32-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-256-NEXT: vrgather.vv v12, v8, v16 -; RV32-BITS-256-NEXT: vmv4r.v v8, v12 +; RV32-BITS-256-NEXT: vmv.v.v v8, v12 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i8: @@ -465,7 +465,7 @@ ; RV32-BITS-512-NEXT: vid.v v12 ; RV32-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV32-BITS-512-NEXT: vrgather.vv v12, v8, v16 -; RV32-BITS-512-NEXT: vmv4r.v v8, v12 +; RV32-BITS-512-NEXT: vmv.v.v v8, v12 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i8: @@ -478,7 +478,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8, m4, ta, mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v12, v8, v16 -; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v8, v12 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v12 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i8: @@ -490,7 +490,7 @@ ; RV64-BITS-256-NEXT: vid.v v12 ; RV64-BITS-256-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-256-NEXT: vrgather.vv v12, v8, v16 -; RV64-BITS-256-NEXT: vmv4r.v v8, v12 +; RV64-BITS-256-NEXT: vmv.v.v v8, v12 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i8: @@ -502,7 +502,7 @@ ; RV64-BITS-512-NEXT: vid.v v12 ; RV64-BITS-512-NEXT: vrsub.vx v16, v12, a0 ; RV64-BITS-512-NEXT: vrgather.vv v12, v8, v16 -; RV64-BITS-512-NEXT: vmv4r.v v8, v12 +; RV64-BITS-512-NEXT: vmv.v.v v8, v12 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i8( %a) ret %res @@ -532,7 +532,7 @@ ; RV32-BITS-256-NEXT: vid.v v16 ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 -; RV32-BITS-256-NEXT: vmv8r.v v8, v16 +; RV32-BITS-256-NEXT: vmv.v.v v8, v16 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i8: @@ -571,7 +571,7 @@ ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 -; RV64-BITS-256-NEXT: vmv8r.v v8, v16 +; RV64-BITS-256-NEXT: vmv.v.v v8, v16 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i8: @@ -632,7 +632,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i16( %a) ret %res @@ -647,7 +647,7 @@ ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i16( %a) ret %res @@ -663,7 +663,7 @@ ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i16( %a) ret %res @@ -679,7 +679,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i16( %a) ret %res @@ -711,7 +711,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i32( %a) ret %res @@ -727,7 +727,7 @@ ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i32( %a) ret %res @@ -742,7 +742,7 @@ ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i32( %a) ret %res @@ -758,7 +758,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i32( %a) ret %res @@ -774,7 +774,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i64( %a) ret %res @@ -790,7 +790,7 @@ ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i64( %a) ret %res @@ -806,7 +806,7 @@ ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i64( %a) ret %res @@ -821,7 +821,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i64( %a) ret %res @@ -873,7 +873,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f16( %a) ret %res @@ -888,7 +888,7 @@ ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f16( %a) ret %res @@ -904,7 +904,7 @@ ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f16( %a) ret %res @@ -920,7 +920,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32f16( %a) ret %res @@ -952,7 +952,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f32( %a) ret %res @@ -968,7 +968,7 @@ ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f32( %a) ret %res @@ -983,7 +983,7 @@ ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f32( %a) ret %res @@ -999,7 +999,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f32( %a) ret %res @@ -1015,7 +1015,7 @@ ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a0 ; CHECK-NEXT: vrgather.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f64( %a) ret %res @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a0 ; CHECK-NEXT: vrgather.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f64( %a) ret %res @@ -1047,7 +1047,7 @@ ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a0 ; CHECK-NEXT: vrgather.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f64( %a) ret %res @@ -1062,7 +1062,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f64( %a) ret %res diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode.ll @@ -51,7 +51,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -62,7 +62,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -95,7 +95,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -106,7 +106,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -117,7 +117,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -128,7 +128,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -139,7 +139,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -150,7 +150,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -161,7 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -172,7 +172,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -183,7 +183,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -194,7 +194,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -205,7 +205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -216,7 +216,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -227,7 +227,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -249,7 +249,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -260,7 +260,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -271,7 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -282,7 +282,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -293,7 +293,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -315,7 +315,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -326,7 +326,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -359,7 +359,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -370,7 +370,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -381,7 +381,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -392,7 +392,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -403,7 +403,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -414,7 +414,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -436,7 +436,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -447,7 +447,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -458,7 +458,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -469,7 +469,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -480,7 +480,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -491,7 +491,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -513,7 +513,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -524,7 +524,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -535,7 +535,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -546,7 +546,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -568,7 +568,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -579,7 +579,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -601,7 +601,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -612,7 +612,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( @@ -346,7 +346,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -369,7 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -380,7 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -481,7 +481,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -492,7 +492,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -569,7 +569,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -580,7 +580,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -883,7 +883,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -894,7 +894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1006,7 +1006,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1107,7 +1107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1118,7 +1118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1343,7 +1343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1354,7 +1354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1461,7 +1461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1472,7 +1472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1579,7 +1579,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1590,7 +1590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll @@ -48,14 +48,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV32-NEXT: vfncvt.f.f.w v10, v8 -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f32_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; RV64-NEXT: vfncvt.f.f.w v10, v8 -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -67,14 +67,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v12, v8 -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f32_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v12, v8 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -86,14 +86,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV32-NEXT: vfncvt.f.f.w v16, v8 -; RV32-NEXT: vmv4r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv16f32_nxv16f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; RV64-NEXT: vfncvt.f.f.w v16, v8 -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -166,14 +166,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV32-NEXT: vfncvt.f.f.w v10, v8 -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv2f64_nxv2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; RV64-NEXT: vfncvt.f.f.w v10, v8 -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -206,14 +206,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV32-NEXT: vfncvt.f.f.w v12, v8 -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f64_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; RV64-NEXT: vfncvt.f.f.w v12, v8 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -246,14 +246,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV32-NEXT: vfncvt.f.f.w v16, v8 -; RV32-NEXT: vmv4r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f64_nxv8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; RV64-NEXT: vfncvt.f.f.w v16, v8 -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll @@ -108,7 +108,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( @@ -156,7 +156,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( @@ -204,7 +204,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( @@ -252,7 +252,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( @@ -348,7 +348,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( @@ -396,7 +396,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( @@ -444,7 +444,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( @@ -492,7 +492,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( @@ -543,7 +543,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -599,7 +599,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -655,7 +655,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -711,7 +711,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll @@ -108,7 +108,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( @@ -156,7 +156,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( @@ -204,7 +204,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( @@ -252,7 +252,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( @@ -348,7 +348,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( @@ -396,7 +396,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( @@ -444,7 +444,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( @@ -492,7 +492,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( @@ -540,7 +540,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vfslide1up.vf v9, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( @@ -588,7 +588,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vfslide1up.vf v10, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( @@ -636,7 +636,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vfslide1up.vf v12, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( @@ -684,7 +684,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll @@ -1145,7 +1145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1156,7 +1156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1220,7 +1220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1273,7 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1284,7 +1284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1405,7 +1405,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1416,7 +1416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v10, v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1471,7 +1471,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1482,7 +1482,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v12, v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1537,7 +1537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1548,7 +1548,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vfncvt.f.xu.w v16, v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( @@ -835,7 +835,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( @@ -881,7 +881,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( @@ -973,7 +973,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( @@ -1065,7 +1065,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( @@ -1429,7 +1429,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( @@ -1613,7 +1613,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( @@ -1659,7 +1659,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( @@ -1705,7 +1705,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( @@ -1976,7 +1976,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( @@ -2022,7 +2022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( @@ -2114,7 +2114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( @@ -2252,7 +2252,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( @@ -2298,7 +2298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( @@ -2344,7 +2344,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( @@ -2615,7 +2615,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( @@ -2661,7 +2661,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( @@ -2707,7 +2707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( @@ -2753,7 +2753,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( @@ -2937,7 +2937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( @@ -2983,7 +2983,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( @@ -3029,7 +3029,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( @@ -3391,7 +3391,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( @@ -3437,7 +3437,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( @@ -3483,7 +3483,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( @@ -3529,7 +3529,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( @@ -3575,7 +3575,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( @@ -3621,7 +3621,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( @@ -3667,7 +3667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( @@ -3713,7 +3713,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( @@ -4075,7 +4075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( @@ -4167,7 +4167,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( @@ -4213,7 +4213,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( @@ -4259,7 +4259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( @@ -4305,7 +4305,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( @@ -4351,7 +4351,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( @@ -4397,7 +4397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( @@ -4850,7 +4850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( @@ -4896,7 +4896,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( @@ -4942,7 +4942,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( @@ -4988,7 +4988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( @@ -5080,7 +5080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( @@ -5126,7 +5126,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( @@ -5172,7 +5172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( @@ -5218,7 +5218,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( @@ -5264,7 +5264,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( @@ -5310,7 +5310,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( @@ -5356,7 +5356,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( @@ -5402,7 +5402,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( @@ -5540,7 +5540,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( @@ -5586,7 +5586,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( @@ -5632,7 +5632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( @@ -5678,7 +5678,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( @@ -5770,7 +5770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( @@ -5816,7 +5816,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( @@ -5862,7 +5862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( @@ -5908,7 +5908,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( @@ -5954,7 +5954,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( @@ -6000,7 +6000,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( @@ -6046,7 +6046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( @@ -6092,7 +6092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( @@ -835,7 +835,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( @@ -881,7 +881,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( @@ -973,7 +973,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( @@ -1065,7 +1065,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( @@ -1429,7 +1429,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( @@ -1613,7 +1613,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( @@ -1659,7 +1659,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( @@ -1705,7 +1705,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( @@ -1976,7 +1976,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( @@ -2022,7 +2022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( @@ -2114,7 +2114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( @@ -2252,7 +2252,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( @@ -2298,7 +2298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( @@ -2344,7 +2344,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( @@ -2615,7 +2615,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( @@ -2661,7 +2661,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( @@ -2707,7 +2707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( @@ -2753,7 +2753,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( @@ -2937,7 +2937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( @@ -2983,7 +2983,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( @@ -3029,7 +3029,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( @@ -3391,7 +3391,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( @@ -3437,7 +3437,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( @@ -3483,7 +3483,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( @@ -3529,7 +3529,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( @@ -3575,7 +3575,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( @@ -3621,7 +3621,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( @@ -3667,7 +3667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( @@ -3713,7 +3713,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( @@ -4075,7 +4075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( @@ -4167,7 +4167,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( @@ -4213,7 +4213,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( @@ -4259,7 +4259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( @@ -4305,7 +4305,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( @@ -4351,7 +4351,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( @@ -4397,7 +4397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( @@ -4850,7 +4850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( @@ -4896,7 +4896,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( @@ -4942,7 +4942,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( @@ -4988,7 +4988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( @@ -5080,7 +5080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( @@ -5126,7 +5126,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( @@ -5172,7 +5172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( @@ -5218,7 +5218,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( @@ -5264,7 +5264,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( @@ -5310,7 +5310,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( @@ -5356,7 +5356,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( @@ -5402,7 +5402,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( @@ -5540,7 +5540,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( @@ -5586,7 +5586,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( @@ -5632,7 +5632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( @@ -5678,7 +5678,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( @@ -5770,7 +5770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( @@ -5816,7 +5816,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( @@ -5862,7 +5862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( @@ -5908,7 +5908,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( @@ -5954,7 +5954,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vloxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( @@ -6000,7 +6000,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vloxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( @@ -6046,7 +6046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vloxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( @@ -6092,7 +6092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( @@ -835,7 +835,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( @@ -881,7 +881,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( @@ -973,7 +973,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( @@ -1065,7 +1065,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( @@ -1429,7 +1429,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( @@ -1613,7 +1613,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( @@ -1659,7 +1659,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( @@ -1705,7 +1705,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( @@ -1976,7 +1976,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( @@ -2022,7 +2022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( @@ -2114,7 +2114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( @@ -2252,7 +2252,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( @@ -2298,7 +2298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( @@ -2344,7 +2344,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( @@ -2615,7 +2615,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( @@ -2661,7 +2661,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( @@ -2707,7 +2707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( @@ -2753,7 +2753,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( @@ -2937,7 +2937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( @@ -2983,7 +2983,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( @@ -3029,7 +3029,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( @@ -3391,7 +3391,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( @@ -3437,7 +3437,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( @@ -3483,7 +3483,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( @@ -3529,7 +3529,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( @@ -3575,7 +3575,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( @@ -3621,7 +3621,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( @@ -3667,7 +3667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( @@ -3713,7 +3713,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( @@ -4075,7 +4075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( @@ -4167,7 +4167,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( @@ -4213,7 +4213,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( @@ -4259,7 +4259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( @@ -4305,7 +4305,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( @@ -4351,7 +4351,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( @@ -4397,7 +4397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( @@ -4850,7 +4850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( @@ -4896,7 +4896,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( @@ -4942,7 +4942,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( @@ -4988,7 +4988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( @@ -5080,7 +5080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( @@ -5126,7 +5126,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( @@ -5172,7 +5172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( @@ -5218,7 +5218,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( @@ -5264,7 +5264,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( @@ -5310,7 +5310,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( @@ -5356,7 +5356,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( @@ -5402,7 +5402,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( @@ -5540,7 +5540,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( @@ -5586,7 +5586,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( @@ -5632,7 +5632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( @@ -5678,7 +5678,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( @@ -5770,7 +5770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( @@ -5816,7 +5816,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( @@ -5862,7 +5862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( @@ -5908,7 +5908,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( @@ -5954,7 +5954,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( @@ -6000,7 +6000,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( @@ -6046,7 +6046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( @@ -6092,7 +6092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( @@ -333,7 +333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( @@ -835,7 +835,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( @@ -881,7 +881,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( @@ -973,7 +973,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei64.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( @@ -1019,7 +1019,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei64.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( @@ -1065,7 +1065,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei64.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( @@ -1429,7 +1429,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( @@ -1475,7 +1475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( @@ -1613,7 +1613,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( @@ -1659,7 +1659,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( @@ -1705,7 +1705,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( @@ -1976,7 +1976,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( @@ -2022,7 +2022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( @@ -2114,7 +2114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( @@ -2252,7 +2252,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( @@ -2298,7 +2298,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( @@ -2344,7 +2344,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( @@ -2615,7 +2615,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei32.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( @@ -2661,7 +2661,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei32.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( @@ -2707,7 +2707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei32.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( @@ -2753,7 +2753,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( @@ -2937,7 +2937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( @@ -2983,7 +2983,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( @@ -3029,7 +3029,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( @@ -3391,7 +3391,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( @@ -3437,7 +3437,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( @@ -3483,7 +3483,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( @@ -3529,7 +3529,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( @@ -3575,7 +3575,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( @@ -3621,7 +3621,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( @@ -3667,7 +3667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( @@ -3713,7 +3713,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( @@ -4075,7 +4075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( @@ -4167,7 +4167,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( @@ -4213,7 +4213,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( @@ -4259,7 +4259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei16.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( @@ -4305,7 +4305,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei16.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( @@ -4351,7 +4351,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( @@ -4397,7 +4397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( @@ -4850,7 +4850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( @@ -4896,7 +4896,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( @@ -4942,7 +4942,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( @@ -4988,7 +4988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( @@ -5080,7 +5080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( @@ -5126,7 +5126,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( @@ -5172,7 +5172,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( @@ -5218,7 +5218,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( @@ -5264,7 +5264,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( @@ -5310,7 +5310,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( @@ -5356,7 +5356,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( @@ -5402,7 +5402,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( @@ -5540,7 +5540,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( @@ -5586,7 +5586,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( @@ -5632,7 +5632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( @@ -5678,7 +5678,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( @@ -5770,7 +5770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( @@ -5816,7 +5816,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( @@ -5862,7 +5862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( @@ -5908,7 +5908,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( @@ -5954,7 +5954,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vluxei8.v v9, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( @@ -6000,7 +6000,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vluxei8.v v10, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( @@ -6046,7 +6046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vluxei8.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( @@ -6092,7 +6092,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -937,7 +937,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmadc.vvm v9, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -1195,7 +1195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -1280,7 +1280,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -1348,7 +1348,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmadc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -932,7 +932,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmadc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( @@ -1069,7 +1069,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -1171,7 +1171,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -1256,7 +1256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -1324,7 +1324,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmadc.vim v9, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv32.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfeq.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfeq.mask.nxv2f32.f32( @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-rv64.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfeq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfeq.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfeq.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfeq.mask.nxv2f32.f32( @@ -1102,7 +1102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfeq.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfeq.mask.nxv1f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfge.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfge.mask.nxv2f32.f32( @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfge.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfge.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfge.mask.nxv2f32.f32( @@ -1102,7 +1102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfge.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfge.mask.nxv1f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfgt.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfgt.mask.nxv2f32.f32( @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfgt.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfgt.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfgt.mask.nxv2f32.f32( @@ -1102,7 +1102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfgt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfgt.mask.nxv1f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv32.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfle.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfle.mask.nxv2f32.f32( @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-rv64.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfle.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfle.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfle.mask.nxv2f32.f32( @@ -1102,7 +1102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfle.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfle.mask.nxv1f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv32.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmflt.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmflt.mask.nxv2f32.f32( @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-rv64.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmflt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmflt.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmflt.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmflt.mask.nxv2f32.f32( @@ -1102,7 +1102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmflt.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmflt.mask.nxv1f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv32.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfne.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfne.mask.nxv2f32.f32( @@ -1109,7 +1109,7 @@ ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-rv64.ll @@ -138,9 +138,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmfne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv4f16( @@ -346,9 +346,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmfne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv2f32( @@ -502,9 +502,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmfne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmfne.nxv1f64( @@ -759,7 +759,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfne.mask.nxv4f16.f16( @@ -955,7 +955,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfne.mask.nxv2f32.f32( @@ -1102,7 +1102,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmfne.vf v10, v8, ft0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmfne.mask.nxv1f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( @@ -937,7 +937,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v10, (a0), zero ; CHECK-NEXT: vmsbc.vvm v9, v8, v10, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsbc.vvm v10, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( @@ -932,7 +932,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmsbc.vxm v9, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmv.v.v v0, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsbf.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbf.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsbf.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbf.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmseq-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmseq.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmseq.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i64( @@ -1702,7 +1702,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( @@ -2136,7 +2136,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( @@ -2276,7 +2276,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( @@ -2381,7 +2381,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsge.nxv1i64( @@ -1928,7 +1928,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( @@ -2103,7 +2103,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( @@ -2243,7 +2243,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( @@ -2348,7 +2348,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i64( @@ -1702,7 +1702,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1961,7 +1961,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( @@ -2136,7 +2136,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( @@ -2276,7 +2276,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( @@ -2381,7 +2381,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgeu.nxv1i64( @@ -1928,7 +1928,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( @@ -2103,7 +2103,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( @@ -2243,7 +2243,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( @@ -2348,7 +2348,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgt.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgt.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vv v10, v11, v8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v9, v8 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsgtu.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsgtu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsif.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsif.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsif.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsle-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsle.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsle.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsle.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsleu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsleu.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsleu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmslt.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmslt.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsle.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsltu.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsltu.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv32.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( @@ -1687,7 +1687,7 @@ ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vv v10, v8, v11, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1946,7 +1946,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( @@ -2121,7 +2121,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( @@ -2261,7 +2261,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( @@ -2366,7 +2366,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsne-rv64.ll @@ -190,9 +190,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv8i8( @@ -450,9 +450,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv4i16( @@ -658,9 +658,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv2i32( @@ -814,9 +814,9 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmsne.vv v8, v8, v9 ; CHECK-NEXT: vmv1r.v v11, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t -; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: vmv.v.v v0, v11 ; CHECK-NEXT: ret entry: %mask = call @llvm.riscv.vmsne.nxv1i64( @@ -1112,7 +1112,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( @@ -1347,7 +1347,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( @@ -1535,7 +1535,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( @@ -1676,7 +1676,7 @@ ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( @@ -1910,7 +1910,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv8i8.i8( @@ -2085,7 +2085,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv4i16.i16( @@ -2225,7 +2225,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv2i32.i32( @@ -2330,7 +2330,7 @@ ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t -; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmv.v.v v0, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsne.mask.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsof.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vmsof.m v8, v0 -; CHECK-NEXT: vmv1r.v v0, v8 +; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsof.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -0,0 +1,301 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -verify-machineinstrs -mtriple riscv64 -run-pass=postrapseudos %s -o - | FileCheck %s + +... +--- +name: copy_different_lmul +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 82 = e32,m4 + ; CHECK-LABEL: name: copy_different_lmul + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v28m2 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v12m2 = COPY $v28m2 +... +--- +name: copy_convert_to_vmv_v_v +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 82 = e32,m4 + ; CHECK-LABEL: name: copy_convert_to_vmv_v_v + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5, implicit $vl, implicit $vtype + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_convert_to_vmv_v_i +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14 + ; 82 = e32,m4 + ; CHECK-LABEL: name: copy_convert_to_vmv_v_i + ; CHECK: liveins: $x14 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_from_whole_load_store +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 82 = e32,m4 + ; CHECK-LABEL: name: copy_from_whole_load_store + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = VL4RE32_V $x16 + ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = VL4RE32_V $x16 + $v12m4 = COPY $v28m4 +... +--- +name: copy_with_vleff +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 82 = e32,m4 + ; CHECK-LABEL: name: copy_with_vleff + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl + ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl + $v12m4 = COPY $v28m4 +... +--- +name: copy_with_vsetvl_x0_x0_1 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16, $x17, $x18 + ; 82 = e32,m4 + ; 73 = e16,m2 + ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_1 + ; CHECK: liveins: $x14, $x16, $x17, $x18 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x17, 73, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $x15 = PseudoVSETVLI $x17, 73, implicit-def $vl, implicit-def $vtype + $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype + $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_with_vsetvl_x0_x0_2 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16, $x17, $x18 + ; 82 = e32,m4 + ; 73 = e16,m2 + ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_2 + ; CHECK: liveins: $x14, $x16, $x17, $x18 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5, implicit $vl, implicit $vtype + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype + $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + $x0 = PseudoVSETVLIX0 $x0, 82, implicit-def $vl, implicit-def $vtype + $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_with_vsetvl_x0_x0_3 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16, $x17, $x18 + ; 82 = e32,m4 + ; 73 = e16,m2 + ; CHECK-LABEL: name: copy_with_vsetvl_x0_x0_3 + ; CHECK: liveins: $x14, $x16, $x17, $x18 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype + $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_subregister +tracksRegLiveness: true +body: | + bb.0: + liveins: $x16, $x17 + ; 73 = e16,m2 + ; CHECK-LABEL: name: copy_subregister + ; CHECK: liveins: $x16, $x17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v8m2 = PseudoVLE16_V_M2 killed $x17, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: early-clobber $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v28m2 + $x15 = PseudoVSETIVLI 4, 73, implicit-def $vl, implicit-def $vtype + $v26m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype + $v8m2 = PseudoVLE16_V_M2 killed $x17, $noreg, 4, implicit $vl, implicit $vtype + $v28m4 = PseudoVWADD_VV_M2 $v26m2, $v8m2, $noreg, 4, implicit $vl, implicit $vtype + $v12m2 = COPY $v28m2 +... +--- +name: copy_from_reload +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16, $x17 + ; 73 = e16,m2 + ; CHECK-LABEL: name: copy_from_reload + ; CHECK: liveins: $x14, $x16, $x17 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 73, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v2m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x12 = PseudoReadVLENB + ; CHECK-NEXT: $x12 = SLLI $x12, 1 + ; CHECK-NEXT: $v2m2_v4m2 = PseudoVRELOAD2_M2 killed $x17, killed $x12 + ; CHECK-NEXT: $v12m2 = PseudoVMV2R_V $v2m2 + $x15 = PseudoVSETVLI $x14, 73, implicit-def $vl, implicit-def $vtype + $v2m2 = PseudoVLE16_V_M2 killed $x16, $noreg, 4, implicit $vl, implicit $vtype + $x12 = PseudoReadVLENB + $x12 = SLLI $x12, 1 + $v2m2_v4m2 = PseudoVRELOAD2_M2 killed $x17, killed $x12 + $v12m2 = COPY $v2m2 +... +--- +name: copy_with_different_vlmax +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 82 = e32,m4 + ; 74 = e16,m4 + ; CHECK-LABEL: name: copy_with_different_vlmax + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 74, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $x0 = PseudoVSETVLIX0 $x0, 74, implicit-def $vl, implicit-def $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_with_widening_reduction +tracksRegLiveness: true +body: | + bb.0: + liveins: $x10, $v8, $v26, $v27 + ; CHECK-LABEL: name: copy_with_widening_reduction + ; CHECK: liveins: $x10, $v8, $v26, $v27 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v26 = PseudoVMV1R_V killed $v8 + ; CHECK-NEXT: $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8m8 = PseudoVRELOAD_M8 killed $x10 + $x11 = PseudoVSETIVLI 1, 64, implicit-def $vl, implicit-def $vtype + $v8 = PseudoVWREDSUM_VS_M1 killed renamable $v8, killed renamable $v26, killed renamable $v27, 1, 3, implicit $vl, implicit $vtype + $v26 = COPY killed renamable $v8 + $x10 = PseudoVSETVLI killed renamable $x10, 75, implicit-def $vl, implicit-def $vtype + $v8m8 = PseudoVRELOAD_M8 killed $x10 +... +--- +name: copy_zvlsseg_reg +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 80 = e32,m1 + ; CHECK-LABEL: name: copy_zvlsseg_reg + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v10 = PseudoVMV1R_V $v8 + $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype + $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v10 = COPY $v8 +... +--- +name: copy_zvlsseg_reg_2 +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 80 = e32,m1 + ; CHECK-LABEL: name: copy_zvlsseg_reg_2 + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 $v8, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 $v9, $noreg, 5, implicit $vl, implicit $vtype + $x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype + $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v10_v11 = COPY $v8_v9 +... +--- +name: copy_fractional_lmul +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; 87 = e32,mf2 + ; CHECK-LABEL: name: copy_fractional_lmul + ; CHECK: liveins: $x14, $x16 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK-NEXT: $v12 = PseudoVMV1R_V $v28 + $x15 = PseudoVSETVLI $x14, 87, implicit-def $vl, implicit-def $vtype + $v28 = PseudoVLE32_V_MF2 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v12 = COPY $v28 +... diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclip.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclip.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclip.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclip.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclip.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclip.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclip.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclip.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclip.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnclipu.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnclipu.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnclipu.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsra.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsra.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsra.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsra.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsra.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsra.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsra.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsra.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsra.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( @@ -192,7 +192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( @@ -238,7 +238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( @@ -374,7 +374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( @@ -466,7 +466,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( @@ -557,7 +557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wv v11, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v11 +; CHECK-NEXT: vmv.v.v v8, v11 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( @@ -603,7 +603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wv v14, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v14 +; CHECK-NEXT: vmv.v.v v8, v14 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( @@ -649,7 +649,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wv v20, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v20 +; CHECK-NEXT: vmv.v.v v8, v20 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -876,7 +876,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -922,7 +922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1058,7 +1058,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1104,7 +1104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1150,7 +1150,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1241,7 +1241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wx v10, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1287,7 +1287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wx v12, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1333,7 +1333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wx v16, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( @@ -1470,7 +1470,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -1503,7 +1503,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -1536,7 +1536,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1633,7 +1633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1666,7 +1666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1699,7 +1699,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1764,7 +1764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1797,7 +1797,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1830,7 +1830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll @@ -217,14 +217,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i8: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i8.nxv8p0i8( %ptrs, %m, i32 %evl) ret %v @@ -382,14 +382,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i16.nxv4p0i16( %ptrs, %m, i32 %evl) ret %v @@ -400,14 +400,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -422,14 +422,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i16.nxv8p0i16( %ptrs, %m, i32 %evl) ret %v @@ -561,7 +561,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2i32.nxv2p0i32( %ptrs, %m, i32 %evl) ret %v @@ -622,7 +622,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4i32.nxv4p0i32( %ptrs, %m, i32 %evl) ret %v @@ -639,7 +639,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -660,7 +660,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8i32.nxv8p0i32( %ptrs, %m, i32 %evl) ret %v @@ -837,7 +837,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1i64: @@ -856,7 +856,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2i64: @@ -875,7 +875,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4i64: @@ -892,7 +892,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4i64: @@ -913,7 +913,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8i64: @@ -1143,14 +1143,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4f16.nxv4p0f16( %ptrs, %m, i32 %evl) ret %v @@ -1161,14 +1161,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8 -; RV32-NEXT: vmv1r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1183,14 +1183,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8f16.nxv8p0f16( %ptrs, %m, i32 %evl) ret %v @@ -1322,7 +1322,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t -; RV64-NEXT: vmv1r.v v8, v10 +; RV64-NEXT: vmv.v.v v8, v10 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv2f32.nxv2p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1341,7 +1341,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv4f32.nxv4p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1358,7 +1358,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; RV64-NEXT: vluxei64.v v12, (zero), v8 -; RV64-NEXT: vmv2r.v v8, v12 +; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1379,7 +1379,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t -; RV64-NEXT: vmv4r.v v8, v16 +; RV64-NEXT: vmv.v.v v8, v16 ; RV64-NEXT: ret %v = call @llvm.vp.gather.nxv8f32.nxv8p0f32( %ptrs, %m, i32 %evl) ret %v @@ -1556,7 +1556,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t -; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: vmv.v.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv1f64: @@ -1575,7 +1575,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t -; RV32-NEXT: vmv2r.v v8, v10 +; RV32-NEXT: vmv.v.v v8, v10 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv2f64: @@ -1594,7 +1594,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv4f64: @@ -1611,7 +1611,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; RV32-NEXT: vluxei32.v v12, (zero), v8 -; RV32-NEXT: vmv4r.v v8, v12 +; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_truemask_nxv4f64: @@ -1632,7 +1632,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t -; RV32-NEXT: vmv8r.v v8, v16 +; RV32-NEXT: vmv.v.v v8, v16 ; RV32-NEXT: ret ; ; RV64-LABEL: vpgather_nxv8f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i8.i32( @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i8.i32( @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i8.i32( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv64i8.i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i16.i32( @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i16.i32( @@ -518,7 +518,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i16.i32( @@ -564,7 +564,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i16.i32( @@ -657,7 +657,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i32.i32( @@ -703,7 +703,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i32.i32( @@ -749,7 +749,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i32.i32( @@ -795,7 +795,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i32.i32( @@ -934,7 +934,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f16.i32( @@ -980,7 +980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f16.i32( @@ -1026,7 +1026,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f16.i32( @@ -1072,7 +1072,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32f16.i32( @@ -1165,7 +1165,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f32.i32( @@ -1211,7 +1211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f32.i32( @@ -1257,7 +1257,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f32.i32( @@ -1303,7 +1303,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i32( @@ -1350,7 +1350,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f64.i32( @@ -1396,7 +1396,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f64.i32( @@ -1442,7 +1442,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f64.i32( @@ -1488,7 +1488,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f64.i32( @@ -1673,7 +1673,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i32( @@ -1719,7 +1719,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i32( @@ -1765,7 +1765,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i32( @@ -1811,7 +1811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i32( @@ -1949,7 +1949,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i32( @@ -1995,7 +1995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i32( @@ -2041,7 +2041,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i32( @@ -2087,7 +2087,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i32( @@ -2179,7 +2179,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i32( @@ -2225,7 +2225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i32( @@ -2271,7 +2271,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i32( @@ -2317,7 +2317,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i32( @@ -2455,7 +2455,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i32( @@ -2501,7 +2501,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i32( @@ -2547,7 +2547,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i32( @@ -2593,7 +2593,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i32( @@ -2685,7 +2685,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i32( @@ -2731,7 +2731,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i32( @@ -2777,7 +2777,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i32( @@ -2823,7 +2823,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i32( @@ -2869,7 +2869,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i32( @@ -2915,7 +2915,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i32( @@ -2961,7 +2961,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i32( @@ -3007,7 +3007,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i32( @@ -3147,7 +3147,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i32( @@ -3180,7 +3180,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i32( @@ -3213,7 +3213,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i32( @@ -3246,7 +3246,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i32( @@ -3345,7 +3345,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i32( @@ -3378,7 +3378,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i32( @@ -3411,7 +3411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i32( @@ -3444,7 +3444,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i32( @@ -3510,7 +3510,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i32( @@ -3543,7 +3543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i32( @@ -3576,7 +3576,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i32( @@ -3609,7 +3609,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i32( @@ -3708,7 +3708,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i32( @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i32( @@ -3774,7 +3774,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i32( @@ -3807,7 +3807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i32( @@ -3873,7 +3873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i32( @@ -3906,7 +3906,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i32( @@ -3939,7 +3939,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i32( @@ -3972,7 +3972,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i32( @@ -4005,7 +4005,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i32( @@ -4038,7 +4038,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i32( @@ -4071,7 +4071,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i32( @@ -4104,7 +4104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i8.i64( @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i8.i64( @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i8.i64( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv64i8.i64( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i16.i64( @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i16.i64( @@ -518,7 +518,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i16.i64( @@ -564,7 +564,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i16.i64( @@ -657,7 +657,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i32.i64( @@ -703,7 +703,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i32.i64( @@ -749,7 +749,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i32.i64( @@ -795,7 +795,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i32.i64( @@ -842,7 +842,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i64.i64( @@ -888,7 +888,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i64.i64( @@ -934,7 +934,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i64.i64( @@ -980,7 +980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i64.i64( @@ -1119,7 +1119,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f16.i64( @@ -1165,7 +1165,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f16.i64( @@ -1211,7 +1211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f16.i64( @@ -1257,7 +1257,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32f16.i64( @@ -1350,7 +1350,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f32.i64( @@ -1396,7 +1396,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f32.i64( @@ -1442,7 +1442,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f32.i64( @@ -1488,7 +1488,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i64( @@ -1535,7 +1535,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f64.i64( @@ -1581,7 +1581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f64.i64( @@ -1627,7 +1627,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f64.i64( @@ -1673,7 +1673,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f64.i64( @@ -1858,7 +1858,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i64( @@ -1904,7 +1904,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i64( @@ -1950,7 +1950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i64( @@ -1996,7 +1996,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i64( @@ -2134,7 +2134,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i64( @@ -2180,7 +2180,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i64( @@ -2226,7 +2226,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i64( @@ -2272,7 +2272,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i64( @@ -2364,7 +2364,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i64( @@ -2410,7 +2410,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i64( @@ -2456,7 +2456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i64( @@ -2502,7 +2502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i64( @@ -2548,7 +2548,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i64.i64( @@ -2594,7 +2594,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i64.i64( @@ -2640,7 +2640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i64.i64( @@ -2686,7 +2686,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i64.i64( @@ -2824,7 +2824,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i64( @@ -2870,7 +2870,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i64( @@ -2916,7 +2916,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i64( @@ -2962,7 +2962,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i64( @@ -3054,7 +3054,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i64( @@ -3100,7 +3100,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i64( @@ -3146,7 +3146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i64( @@ -3192,7 +3192,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i64( @@ -3238,7 +3238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i64( @@ -3284,7 +3284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i64( @@ -3330,7 +3330,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i64( @@ -3376,7 +3376,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i64( @@ -3516,7 +3516,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i64( @@ -3549,7 +3549,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i64( @@ -3582,7 +3582,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i64( @@ -3615,7 +3615,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i64( @@ -3714,7 +3714,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i64( @@ -3747,7 +3747,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i64( @@ -3780,7 +3780,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i64( @@ -3813,7 +3813,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i64( @@ -3879,7 +3879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i64( @@ -3912,7 +3912,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i64( @@ -3945,7 +3945,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i64( @@ -3978,7 +3978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i64( @@ -4011,7 +4011,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i64.i64( @@ -4044,7 +4044,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i64.i64( @@ -4077,7 +4077,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i64.i64( @@ -4110,7 +4110,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i64.i64( @@ -4209,7 +4209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i64( @@ -4242,7 +4242,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i64( @@ -4275,7 +4275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i64( @@ -4308,7 +4308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i64( @@ -4374,7 +4374,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i64( @@ -4407,7 +4407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i64( @@ -4440,7 +4440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i64( @@ -4473,7 +4473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i64( @@ -4506,7 +4506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vrgather.vi v9, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i64( @@ -4539,7 +4539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vrgather.vi v10, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i64( @@ -4572,7 +4572,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgather.vi v12, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i64( @@ -4605,7 +4605,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i8( @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i8( @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i8( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i16( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i16( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i16( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i16( @@ -610,7 +610,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i32( @@ -656,7 +656,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i32( @@ -702,7 +702,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i32( @@ -749,7 +749,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i64( @@ -795,7 +795,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i64( @@ -934,7 +934,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f16( @@ -980,7 +980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f16( @@ -1026,7 +1026,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f16( @@ -1072,7 +1072,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32f16( @@ -1165,7 +1165,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f32( @@ -1211,7 +1211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f32( @@ -1257,7 +1257,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f32( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f64( @@ -1350,7 +1350,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v9, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i8( @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i8( @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i8( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i16( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i16( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i16( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i16( @@ -610,7 +610,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i32( @@ -656,7 +656,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i32( @@ -702,7 +702,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i32( @@ -749,7 +749,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i64( @@ -795,7 +795,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i64( @@ -934,7 +934,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vrgatherei16.vv v10, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f16( @@ -980,7 +980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f16( @@ -1026,7 +1026,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f16( @@ -1072,7 +1072,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32f16( @@ -1165,7 +1165,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vrgatherei16.vv v12, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f32( @@ -1211,7 +1211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f32( @@ -1257,7 +1257,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f32( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vrgatherei16.vv v16, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f64( @@ -1350,7 +1350,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i8( @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i8( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i8( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i16( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i16( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i16( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i8( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i8( @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i8( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i8( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i16( @@ -640,7 +640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i16( @@ -682,7 +682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i16( @@ -724,7 +724,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i16( @@ -850,7 +850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i16.nxv4i8( @@ -892,7 +892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i16.nxv8i8( @@ -934,7 +934,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i16.nxv16i8( @@ -976,7 +976,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf8 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf8 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i8( @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf8 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i8( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i8( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i16( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i16( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i16( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i8( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i8( @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i8( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i8( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i32( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i32( @@ -640,7 +640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i32( @@ -682,7 +682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i32( @@ -766,7 +766,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i16( @@ -808,7 +808,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i16( @@ -850,7 +850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i16( @@ -892,7 +892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i16( @@ -1018,7 +1018,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vsext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i16.nxv4i8( @@ -1060,7 +1060,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vsext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i16.nxv8i8( @@ -1102,7 +1102,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vsext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i16.nxv16i8( @@ -1144,7 +1144,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i8.i8( @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i8.i8( @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i8.i8( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv64i8.i8( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i16.i16( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i16.i16( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i16.i16( @@ -563,7 +563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i16.i16( @@ -655,7 +655,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i32.i32( @@ -701,7 +701,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i32.i32( @@ -747,7 +747,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i32.i32( @@ -793,7 +793,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i32.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i8.i8( @@ -195,7 +195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i8.i8( @@ -241,7 +241,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i8.i8( @@ -287,7 +287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv64i8.i8( @@ -425,7 +425,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i16.i16( @@ -471,7 +471,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i16.i16( @@ -517,7 +517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i16.i16( @@ -563,7 +563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i16.i16( @@ -655,7 +655,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i32.i32( @@ -701,7 +701,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i32.i32( @@ -747,7 +747,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i32.i32( @@ -793,7 +793,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i32.i32( @@ -839,7 +839,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslide1up.vx v9, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i64.i64( @@ -885,7 +885,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslide1up.vx v10, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i64.i64( @@ -931,7 +931,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu ; CHECK-NEXT: vslide1up.vx v12, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i64.i64( @@ -977,7 +977,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll @@ -37,7 +37,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -48,7 +48,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -115,7 +115,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -138,7 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -161,7 +161,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -234,7 +234,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v10, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -271,7 +271,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v12, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -308,7 +308,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0 -; CHECK-NEXT: vmv4r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i8( @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i8( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i8( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i16( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i16( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i16( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i8( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i8( @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i8( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i8( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i32( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i32( @@ -640,7 +640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i32( @@ -682,7 +682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i32( @@ -766,7 +766,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i16( @@ -808,7 +808,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i16( @@ -850,7 +850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i16( @@ -892,7 +892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i16( @@ -1018,7 +1018,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i16.nxv4i8( @@ -1060,7 +1060,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i16.nxv8i8( @@ -1102,7 +1102,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i16.nxv16i8( @@ -1144,7 +1144,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( @@ -52,7 +52,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf8 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i8( @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf8 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i8( @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i8( @@ -178,7 +178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i16( @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i16( @@ -262,7 +262,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i16( @@ -304,7 +304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i16( @@ -388,7 +388,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i8( @@ -430,7 +430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf4 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i8( @@ -472,7 +472,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf4 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i8( @@ -514,7 +514,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i8( @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i32( @@ -598,7 +598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i32( @@ -640,7 +640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i32( @@ -682,7 +682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i32( @@ -766,7 +766,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i16( @@ -808,7 +808,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i16( @@ -850,7 +850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i16( @@ -892,7 +892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i16( @@ -1018,7 +1018,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v9, v8 -; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i16.nxv4i8( @@ -1060,7 +1060,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu ; CHECK-NEXT: vzext.vf2 v10, v8 -; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i16.nxv8i8( @@ -1102,7 +1102,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu ; CHECK-NEXT: vzext.vf2 v12, v8 -; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i16.nxv16i8( @@ -1144,7 +1144,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv32i16.nxv32i8(