diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -35,6 +35,10 @@ #define GET_INSTRINFO_CTOR_DTOR #include "RISCVGenInstrInfo.inc" +static cl::opt PreferWholeRegisterMove( + "riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, + cl::desc("Prefer whole register move for vector registers.")); + namespace llvm { namespace RISCVVPseudosTable { @@ -117,6 +121,116 @@ return ((DstReg - SrcReg) & 0x1f) < NumRegs; } +static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, + const MachineBasicBlock &MBB, + MachineBasicBlock::const_iterator MBBI, + MachineBasicBlock::const_iterator &DefMBBI, + RISCVII::VLMUL &LMul) { + if (PreferWholeRegisterMove) + return false; + + assert(MBBI->getOpcode() == TargetOpcode::COPY && + "Unexpected COPY instruction."); + Register SrcReg = MBBI->getOperand(1).getReg(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); + + bool FoundDef = false; + bool FoundVSetVLIForCopy = false; + MachineBasicBlock::const_iterator VSetVLIForCopy; + Register CopyAVLReg = RISCV::NoRegister; + while (MBBI != MBB.begin()) { + --MBBI; + if (MBBI->isMetaInstruction()) + continue; + + if (MBBI->getOpcode() == RISCV::PseudoVSETVLI || + MBBI->getOpcode() == RISCV::PseudoVSETIVLI) { + // There is a vsetvli between COPY and source define instruction. + // vy = def_vop ... + // ... + // vsetvli + // ... + // vx = COPY vy + if (!FoundDef) { + if (!FoundVSetVLIForCopy) { + FoundVSetVLIForCopy = true; + VSetVLIForCopy = MBBI; + if (MBBI->getOperand(1).isReg()) + CopyAVLReg = MBBI->getOperand(1).getReg(); + } + continue; + } + + // The first vsetvli after the def_vop instruction. + unsigned VType = MBBI->getOperand(2).getImm(); + // If the vsetvli is tail undisturbed, keep the whole register move. + if (!RISCVVType::isTailAgnostic(VType)) + return false; + RISCVII::VLMUL SetLMul = RISCVVType::getVLMUL(VType); + unsigned SetSEW = RISCVVType::getSEW(VType); + MachineOperand SetAVL = MBBI->getOperand(1); + if (FoundVSetVLIForCopy) { + // vsetvli (current MBBI) + // ... + // vy = def_vop ... + // ... + // vsetvli (VSetVLIForCopy) + // ... + // vx = COPY vy + unsigned CopyVType = VSetVLIForCopy->getOperand(2).getImm(); + RISCVII::VLMUL CopyLMul = RISCVVType::getVLMUL(CopyVType); + unsigned CopySEW = RISCVVType::getSEW(CopyVType); + MachineOperand CopyAVL = VSetVLIForCopy->getOperand(1); + if (LMul == SetLMul && CopyLMul == SetLMul && CopySEW == SetSEW && + ((CopyAVL.isReg() && SetAVL.isReg() && + CopyAVL.getReg() == SetAVL.getReg()) || + (CopyAVL.isImm() && SetAVL.isImm() && + CopyAVL.getImm() == SetAVL.getImm()))) + return true; + else + return false; + } else { + // vsetvli + // ... + // vy = def_vop ... + // ... + // vx = COPY vy + // + // The checking is conservative. We only have register classes for + // LMUL = 1/2/4/8. We should be able to convert vmv1r.v to vmv.v.v + // for fractional LMUL operations. However, we could not use the SetLMul + // for widening operations. The result of widening operation is + // 2 x LMUL. + if (LMul == SetLMul) + return true; + else + return false; + } + } else if (MBBI->isInlineAsm() || MBBI->isCall()) { + assert(!FoundDef && "There must be no inline asm or calls between " + "the source defined instruction and vsetvli."); + return false; + } else if (MBBI->getNumDefs()) { + for (const MachineOperand &Op : MBBI->defs()) { + if (FoundVSetVLIForCopy) { + // The register of AVL of COPY is redefined. + // We could not know the AVL of COPY and def_vop is the same or not. + if (TRI->isSubRegisterEq(Op.getReg(), CopyAVLReg)) + return false; + } + if (!FoundDef && TRI->isSubRegisterEq(Op.getReg(), SrcReg)) { + // Found the definition. + FoundDef = true; + DefMBBI = MBBI; + break; + } + } + } + } + + return false; +} + void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, @@ -132,7 +246,7 @@ unsigned Opc; bool IsScalableVector = true; unsigned NF = 1; - unsigned LMul = 1; + RISCVII::VLMUL LMul = RISCVII::LMUL_1; unsigned SubRegIdx = RISCV::sub_vrm1_0; if (RISCV::FPR16RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::FSGNJ_H; @@ -145,91 +259,161 @@ IsScalableVector = false; } else if (RISCV::VRRegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRM2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRM4RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV4R_V; + LMul = RISCVII::LMUL_4; } else if (RISCV::VRM8RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV8R_V; + LMul = RISCVII::LMUL_8; } else if (RISCV::VRN2M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 2; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN2M2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 2; - LMul = 2; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRN2M4RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV4R_V; SubRegIdx = RISCV::sub_vrm4_0; NF = 2; - LMul = 4; + LMul = RISCVII::LMUL_4; } else if (RISCV::VRN3M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 3; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN3M2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 3; - LMul = 2; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRN4M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 4; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN4M2RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV2R_V; SubRegIdx = RISCV::sub_vrm2_0; NF = 4; - LMul = 2; + LMul = RISCVII::LMUL_2; } else if (RISCV::VRN5M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 5; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN6M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 6; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN7M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 7; - LMul = 1; + LMul = RISCVII::LMUL_1; } else if (RISCV::VRN8M1RegClass.contains(DstReg, SrcReg)) { Opc = RISCV::PseudoVMV1R_V; SubRegIdx = RISCV::sub_vrm1_0; NF = 8; - LMul = 1; + LMul = RISCVII::LMUL_1; } else { llvm_unreachable("Impossible reg-to-reg copy"); } if (IsScalableVector) { + bool UseVMV_V_V = false; + MachineBasicBlock::const_iterator DefMBBI; + unsigned DefExplicitOpNum; + unsigned VIOpc; + if (isConvertibleToVMV_V_V(STI, MBB, MBBI, DefMBBI, LMul)) { + UseVMV_V_V = true; + DefExplicitOpNum = DefMBBI->getNumExplicitOperands(); + switch (LMul) { + default: + llvm_unreachable("Impossible LMUL for vector register copy."); + case RISCVII::LMUL_F8: + Opc = RISCV::PseudoVMV_V_V_MF8; + VIOpc = RISCV::PseudoVMV_V_I_MF8; + break; + case RISCVII::LMUL_F4: + Opc = RISCV::PseudoVMV_V_V_MF4; + VIOpc = RISCV::PseudoVMV_V_I_MF4; + break; + case RISCVII::LMUL_F2: + Opc = RISCV::PseudoVMV_V_V_MF2; + VIOpc = RISCV::PseudoVMV_V_I_MF2; + break; + case RISCVII::LMUL_1: + Opc = RISCV::PseudoVMV_V_V_M1; + VIOpc = RISCV::PseudoVMV_V_I_M1; + break; + case RISCVII::LMUL_2: + Opc = RISCV::PseudoVMV_V_V_M2; + VIOpc = RISCV::PseudoVMV_V_I_M2; + break; + case RISCVII::LMUL_4: + Opc = RISCV::PseudoVMV_V_V_M4; + VIOpc = RISCV::PseudoVMV_V_I_M4; + break; + case RISCVII::LMUL_8: + Opc = RISCV::PseudoVMV_V_V_M8; + VIOpc = RISCV::PseudoVMV_V_I_M8; + break; + } + } + + bool UseVMV_V_I = false; + if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) { + UseVMV_V_I = true; + Opc = VIOpc; + } + if (NF == 1) { - BuildMI(MBB, MBBI, DL, get(Opc), DstReg) - .addReg(SrcReg, getKillRegState(KillSrc)); + auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), DstReg); + if (UseVMV_V_I) + MIB = MIB.add(DefMBBI->getOperand(1)); + else + MIB = MIB.addReg(SrcReg, getKillRegState(KillSrc)); + if (UseVMV_V_V) { + // The last two arguments of vector instructions are + // AVL, SEW. + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW + } } else { const TargetRegisterInfo *TRI = STI.getRegisterInfo(); int I = 0, End = NF, Incr = 1; unsigned SrcEncoding = TRI->getEncodingValue(SrcReg); unsigned DstEncoding = TRI->getEncodingValue(DstReg); - if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMul)) { + unsigned LMulBits = static_cast(LMul); + unsigned LMulVal = 1 << (LMulBits < 4 ? LMulBits : 0); + if (forwardCopyWillClobberTuple(DstEncoding, SrcEncoding, NF * LMulVal)) { I = NF - 1; End = -1; Incr = -1; } for (; I != End; I += Incr) { - BuildMI(MBB, MBBI, DL, get(Opc), TRI->getSubReg(DstReg, SubRegIdx + I)) - .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I), - getKillRegState(KillSrc)); + auto MIB = BuildMI(MBB, MBBI, DL, get(Opc), + TRI->getSubReg(DstReg, SubRegIdx + I)); + if (UseVMV_V_I) + MIB = MIB.add(DefMBBI->getOperand(1)); + else + MIB = MIB.addReg(TRI->getSubReg(SrcReg, SubRegIdx + I), + getKillRegState(KillSrc)); + if (UseVMV_V_V) { + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 2)); // AVL + MIB.add(DefMBBI->getOperand(DefExplicitOpNum - 1)); // SEW + } } } } else { diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll @@ -563,7 +563,7 @@ ; RV32-NEXT: addi a0, a0, 16 ; RV32-NEXT: vs8r.v v8, (a0) ; RV32-NEXT: mv a0, zero -; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call vector_arg_indirect_stack@plt ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 @@ -615,7 +615,7 @@ ; RV64-NEXT: addi a0, a0, 24 ; RV64-NEXT: vs8r.v v8, (a0) ; RV64-NEXT: mv a0, zero -; RV64-NEXT: vmv8r.v v16, v8 +; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call vector_arg_indirect_stack@plt ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 5 diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll @@ -52,7 +52,7 @@ ; RV32-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: addi a0, sp, 32 -; RV32-NEXT: vmv8r.v v16, v8 +; RV32-NEXT: vmv.v.i v16, 0 ; RV32-NEXT: call callee_scalable_vector_split_indirect@plt ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 @@ -80,7 +80,7 @@ ; RV64-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: addi a0, sp, 24 -; RV64-NEXT: vmv8r.v v16, v8 +; RV64-NEXT: vmv.v.i v16, 0 ; RV64-NEXT: call callee_scalable_vector_split_indirect@plt ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll @@ -298,7 +298,7 @@ ; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: addi a2, zero, 42 ; LMULMAX8-NEXT: vse32.v v8, (sp) -; LMULMAX8-NEXT: vmv8r.v v8, v24 +; LMULMAX8-NEXT: vmv.v.v v8, v24 ; LMULMAX8-NEXT: call ext3@plt ; LMULMAX8-NEXT: addi sp, s0, -256 ; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -326,8 +326,8 @@ ; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: addi a3, zero, 42 ; LMULMAX4-NEXT: vse32.v v8, (sp) -; LMULMAX4-NEXT: vmv4r.v v8, v28 -; LMULMAX4-NEXT: vmv4r.v v12, v24 +; LMULMAX4-NEXT: vmv.v.v v8, v28 +; LMULMAX4-NEXT: vmv.v.v v12, v24 ; LMULMAX4-NEXT: call ext3@plt ; LMULMAX4-NEXT: addi sp, s0, -256 ; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -390,7 +390,7 @@ ; LMULMAX8-NEXT: addi t3, zero, 8 ; LMULMAX8-NEXT: vse32.v v8, (sp) ; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: vmv8r.v v16, v8 +; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX8-NEXT: addi sp, s0, -256 ; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -424,9 +424,9 @@ ; LMULMAX4-NEXT: addi t4, zero, 8 ; LMULMAX4-NEXT: vse32.v v8, (sp) ; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: vmv4r.v v12, v8 -; LMULMAX4-NEXT: vmv4r.v v16, v8 -; LMULMAX4-NEXT: vmv4r.v v20, v8 +; LMULMAX4-NEXT: vmv.v.i v12, 0 +; LMULMAX4-NEXT: vmv.v.i v16, 0 +; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_indirect_stack@plt ; LMULMAX4-NEXT: addi sp, s0, -256 ; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -496,7 +496,7 @@ ; LMULMAX8-NEXT: addi t6, zero, 12 ; LMULMAX8-NEXT: sd a0, 0(sp) ; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: vmv8r.v v16, v8 +; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_direct_stack@plt ; LMULMAX8-NEXT: ld ra, 152(sp) # 8-byte Folded Reload ; LMULMAX8-NEXT: addi sp, sp, 160 @@ -531,9 +531,9 @@ ; LMULMAX4-NEXT: addi t6, zero, 12 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: vmv4r.v v12, v8 -; LMULMAX4-NEXT: vmv4r.v v16, v8 -; LMULMAX4-NEXT: vmv4r.v v20, v8 +; LMULMAX4-NEXT: vmv.v.i v12, 0 +; LMULMAX4-NEXT: vmv.v.i v16, 0 +; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_direct_stack@plt ; LMULMAX4-NEXT: ld ra, 152(sp) # 8-byte Folded Reload ; LMULMAX4-NEXT: addi sp, sp, 160 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll @@ -797,7 +797,7 @@ ; LMULMAX8-NEXT: mv a0, sp ; LMULMAX8-NEXT: addi a2, zero, 42 ; LMULMAX8-NEXT: vse32.v v8, (sp) -; LMULMAX8-NEXT: vmv8r.v v8, v24 +; LMULMAX8-NEXT: vmv.v.v v8, v24 ; LMULMAX8-NEXT: call ext3@plt ; LMULMAX8-NEXT: addi sp, s0, -256 ; LMULMAX8-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -825,8 +825,8 @@ ; LMULMAX4-NEXT: mv a0, sp ; LMULMAX4-NEXT: addi a3, zero, 42 ; LMULMAX4-NEXT: vse32.v v8, (sp) -; LMULMAX4-NEXT: vmv4r.v v8, v28 -; LMULMAX4-NEXT: vmv4r.v v12, v24 +; LMULMAX4-NEXT: vmv.v.v v8, v28 +; LMULMAX4-NEXT: vmv.v.v v12, v24 ; LMULMAX4-NEXT: call ext3@plt ; LMULMAX4-NEXT: addi sp, s0, -256 ; LMULMAX4-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -862,10 +862,10 @@ ; LMULMAX2-NEXT: mv a0, sp ; LMULMAX2-NEXT: addi a5, zero, 42 ; LMULMAX2-NEXT: vse32.v v8, (sp) -; LMULMAX2-NEXT: vmv2r.v v8, v26 -; LMULMAX2-NEXT: vmv2r.v v10, v28 -; LMULMAX2-NEXT: vmv2r.v v12, v30 -; LMULMAX2-NEXT: vmv2r.v v14, v24 +; LMULMAX2-NEXT: vmv.v.v v8, v26 +; LMULMAX2-NEXT: vmv.v.v v10, v28 +; LMULMAX2-NEXT: vmv.v.v v12, v30 +; LMULMAX2-NEXT: vmv.v.v v14, v24 ; LMULMAX2-NEXT: call ext3@plt ; LMULMAX2-NEXT: addi sp, s0, -256 ; LMULMAX2-NEXT: ld s0, 240(sp) # 8-byte Folded Reload @@ -921,14 +921,14 @@ ; LMULMAX1-NEXT: addi a0, sp, 128 ; LMULMAX1-NEXT: addi a1, sp, 128 ; LMULMAX1-NEXT: vse32.v v8, (a1) -; LMULMAX1-NEXT: vmv1r.v v8, v25 -; LMULMAX1-NEXT: vmv1r.v v9, v26 -; LMULMAX1-NEXT: vmv1r.v v10, v27 -; LMULMAX1-NEXT: vmv1r.v v11, v28 -; LMULMAX1-NEXT: vmv1r.v v12, v29 -; LMULMAX1-NEXT: vmv1r.v v13, v30 -; LMULMAX1-NEXT: vmv1r.v v14, v31 -; LMULMAX1-NEXT: vmv1r.v v15, v24 +; LMULMAX1-NEXT: vmv.v.v v8, v25 +; LMULMAX1-NEXT: vmv.v.v v9, v26 +; LMULMAX1-NEXT: vmv.v.v v10, v27 +; LMULMAX1-NEXT: vmv.v.v v11, v28 +; LMULMAX1-NEXT: vmv.v.v v12, v29 +; LMULMAX1-NEXT: vmv.v.v v13, v30 +; LMULMAX1-NEXT: vmv.v.v v14, v31 +; LMULMAX1-NEXT: vmv.v.v v15, v24 ; LMULMAX1-NEXT: call ext3@plt ; LMULMAX1-NEXT: addi sp, s0, -384 ; LMULMAX1-NEXT: ld s0, 368(sp) # 8-byte Folded Reload @@ -1103,7 +1103,7 @@ ; LMULMAX2-NEXT: vmv1r.v v10, v8 ; LMULMAX2-NEXT: vmv1r.v v11, v8 ; LMULMAX2-NEXT: vmv1r.v v12, v8 -; LMULMAX2-NEXT: vmv2r.v v22, v14 +; LMULMAX2-NEXT: vmv.v.v v22, v14 ; LMULMAX2-NEXT: call split_vector_args@plt ; LMULMAX2-NEXT: addi sp, s0, -128 ; LMULMAX2-NEXT: ld s0, 112(sp) # 8-byte Folded Reload @@ -1154,9 +1154,9 @@ ; LMULMAX1-NEXT: vmv1r.v v10, v8 ; LMULMAX1-NEXT: vmv1r.v v11, v8 ; LMULMAX1-NEXT: vmv1r.v v12, v8 -; LMULMAX1-NEXT: vmv1r.v v21, v13 -; LMULMAX1-NEXT: vmv1r.v v22, v14 -; LMULMAX1-NEXT: vmv1r.v v23, v15 +; LMULMAX1-NEXT: vmv.v.v v21, v13 +; LMULMAX1-NEXT: vmv.v.v v22, v14 +; LMULMAX1-NEXT: vmv.v.v v23, v15 ; LMULMAX1-NEXT: call split_vector_args@plt ; LMULMAX1-NEXT: addi sp, s0, -128 ; LMULMAX1-NEXT: ld s0, 112(sp) # 8-byte Folded Reload @@ -1259,7 +1259,7 @@ ; LMULMAX8-NEXT: addi a7, zero, 7 ; LMULMAX8-NEXT: sd a0, 128(sp) ; LMULMAX8-NEXT: mv a0, zero -; LMULMAX8-NEXT: vmv8r.v v16, v8 +; LMULMAX8-NEXT: vmv.v.i v16, 0 ; LMULMAX8-NEXT: call vector_arg_via_stack@plt ; LMULMAX8-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX8-NEXT: addi sp, sp, 144 @@ -1286,9 +1286,9 @@ ; LMULMAX4-NEXT: addi a7, zero, 7 ; LMULMAX4-NEXT: vse32.v v8, (a0) ; LMULMAX4-NEXT: mv a0, zero -; LMULMAX4-NEXT: vmv4r.v v12, v8 -; LMULMAX4-NEXT: vmv4r.v v16, v8 -; LMULMAX4-NEXT: vmv4r.v v20, v8 +; LMULMAX4-NEXT: vmv.v.i v12, 0 +; LMULMAX4-NEXT: vmv.v.i v16, 0 +; LMULMAX4-NEXT: vmv.v.i v20, 0 ; LMULMAX4-NEXT: call vector_arg_via_stack@plt ; LMULMAX4-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX4-NEXT: addi sp, sp, 144 @@ -1319,13 +1319,13 @@ ; LMULMAX2-NEXT: addi a7, zero, 7 ; LMULMAX2-NEXT: vse32.v v8, (a0) ; LMULMAX2-NEXT: mv a0, zero -; LMULMAX2-NEXT: vmv2r.v v10, v8 -; LMULMAX2-NEXT: vmv2r.v v12, v8 -; LMULMAX2-NEXT: vmv2r.v v14, v8 -; LMULMAX2-NEXT: vmv2r.v v16, v8 -; LMULMAX2-NEXT: vmv2r.v v18, v8 -; LMULMAX2-NEXT: vmv2r.v v20, v8 -; LMULMAX2-NEXT: vmv2r.v v22, v8 +; LMULMAX2-NEXT: vmv.v.i v10, 0 +; LMULMAX2-NEXT: vmv.v.i v12, 0 +; LMULMAX2-NEXT: vmv.v.i v14, 0 +; LMULMAX2-NEXT: vmv.v.i v16, 0 +; LMULMAX2-NEXT: vmv.v.i v18, 0 +; LMULMAX2-NEXT: vmv.v.i v20, 0 +; LMULMAX2-NEXT: vmv.v.i v22, 0 ; LMULMAX2-NEXT: call vector_arg_via_stack@plt ; LMULMAX2-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX2-NEXT: addi sp, sp, 144 @@ -1364,21 +1364,21 @@ ; LMULMAX1-NEXT: addi a7, zero, 7 ; LMULMAX1-NEXT: vse32.v v8, (a0) ; LMULMAX1-NEXT: mv a0, zero -; LMULMAX1-NEXT: vmv1r.v v9, v8 -; LMULMAX1-NEXT: vmv1r.v v10, v8 -; LMULMAX1-NEXT: vmv1r.v v11, v8 -; LMULMAX1-NEXT: vmv1r.v v12, v8 -; LMULMAX1-NEXT: vmv1r.v v13, v8 -; LMULMAX1-NEXT: vmv1r.v v14, v8 -; LMULMAX1-NEXT: vmv1r.v v15, v8 -; LMULMAX1-NEXT: vmv1r.v v16, v8 -; LMULMAX1-NEXT: vmv1r.v v17, v8 -; LMULMAX1-NEXT: vmv1r.v v18, v8 -; LMULMAX1-NEXT: vmv1r.v v19, v8 -; LMULMAX1-NEXT: vmv1r.v v20, v8 -; LMULMAX1-NEXT: vmv1r.v v21, v8 -; LMULMAX1-NEXT: vmv1r.v v22, v8 -; LMULMAX1-NEXT: vmv1r.v v23, v8 +; LMULMAX1-NEXT: vmv.v.i v9, 0 +; LMULMAX1-NEXT: vmv.v.i v10, 0 +; LMULMAX1-NEXT: vmv.v.i v11, 0 +; LMULMAX1-NEXT: vmv.v.i v12, 0 +; LMULMAX1-NEXT: vmv.v.i v13, 0 +; LMULMAX1-NEXT: vmv.v.i v14, 0 +; LMULMAX1-NEXT: vmv.v.i v15, 0 +; LMULMAX1-NEXT: vmv.v.i v16, 0 +; LMULMAX1-NEXT: vmv.v.i v17, 0 +; LMULMAX1-NEXT: vmv.v.i v18, 0 +; LMULMAX1-NEXT: vmv.v.i v19, 0 +; LMULMAX1-NEXT: vmv.v.i v20, 0 +; LMULMAX1-NEXT: vmv.v.i v21, 0 +; LMULMAX1-NEXT: vmv.v.i v22, 0 +; LMULMAX1-NEXT: vmv.v.i v23, 0 ; LMULMAX1-NEXT: call vector_arg_via_stack@plt ; LMULMAX1-NEXT: ld ra, 136(sp) # 8-byte Folded Reload ; LMULMAX1-NEXT: addi sp, sp, 144 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll @@ -89,7 +89,7 @@ ; RV32-NEXT: vle16.v v25, (a0) ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vrgatherei16.vv v26, v8, v25 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_vu_v4f64: @@ -99,7 +99,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64,m2,ta,mu ; RV64-NEXT: vle64.v v28, (a0) ; RV64-NEXT: vrgather.vv v26, v8, v28 -; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: vmv.v.v v8, v26 ; RV64-NEXT: ret %s = shufflevector <4 x double> %x, <4 x double> undef, <4 x i32> ret <4 x double> %s @@ -114,7 +114,7 @@ ; RV32-NEXT: vle16.v v25, (a0) ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vrgatherei16.vv v26, v8, v25 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_uv_v4f64: @@ -124,7 +124,7 @@ ; RV64-NEXT: vsetivli zero, 4, e64,m2,ta,mu ; RV64-NEXT: vle64.v v28, (a0) ; RV64-NEXT: vrgather.vv v26, v8, v28 -; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: vmv.v.v v8, v26 ; RV64-NEXT: ret %s = shufflevector <4 x double> undef, <4 x double> %x, <4 x i32> ret <4 x double> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -156,7 +156,7 @@ ; RV32-NEXT: vle16.v v25, (a0) ; RV32-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; RV32-NEXT: vrgatherei16.vv v28, v8, v25 -; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: vmv.v.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_vu_v8i64: @@ -166,7 +166,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64,m4,ta,mu ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v28, v8, v12 -; RV64-NEXT: vmv4r.v v8, v28 +; RV64-NEXT: vmv.v.v v8, v28 ; RV64-NEXT: ret %s = shufflevector <8 x i64> %x, <8 x i64> undef, <8 x i32> ret <8 x i64> %s @@ -181,7 +181,7 @@ ; RV32-NEXT: vle16.v v25, (a0) ; RV32-NEXT: vsetvli zero, zero, e64,m4,ta,mu ; RV32-NEXT: vrgatherei16.vv v28, v8, v25 -; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: vmv.v.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: vrgather_permute_shuffle_uv_v8i64: @@ -191,7 +191,7 @@ ; RV64-NEXT: vsetivli zero, 8, e64,m4,ta,mu ; RV64-NEXT: vle64.v v12, (a0) ; RV64-NEXT: vrgather.vv v28, v8, v12 -; RV64-NEXT: vmv4r.v v8, v28 +; RV64-NEXT: vmv.v.v v8, v28 ; RV64-NEXT: ret %s = shufflevector <8 x i64> undef, <8 x i64> %x, <8 x i32> ret <8 x i64> %s diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -954,7 +954,7 @@ ; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu ; CHECK-NEXT: vmv.v.i v28, 0 ; CHECK-NEXT: lui a1, 1048568 -; CHECK-NEXT: vmv1r.v v29, v28 +; CHECK-NEXT: vmv.v.i v29, 0 ; CHECK-NEXT: vmv.s.x v29, a1 ; CHECK-NEXT: vsetivli zero, 7, e16,m1,tu,mu ; CHECK-NEXT: vslideup.vi v28, v26, 6 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll @@ -680,7 +680,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32,m1,ta,mu ; RV64-NEXT: vloxei64.v v25, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: vmv.v.v v8, v25 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -965,7 +965,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64,m2,ta,mu ; RV32-NEXT: vloxei32.v v26, (zero), v8 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4i64: @@ -1553,7 +1553,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32,m1,ta,mu ; RV64-NEXT: vloxei64.v v25, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: vmv.v.v v8, v25 ; RV64-NEXT: ret %mhead = insertelement <4 x i1> undef, i1 1, i32 0 %mtrue = shufflevector <4 x i1> %mhead, <4 x i1> undef, <4 x i32> zeroinitializer @@ -1838,7 +1838,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e64,m2,ta,mu ; RV32-NEXT: vloxei32.v v26, (zero), v8 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_v4f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll @@ -135,7 +135,7 @@ ; RV32-NEXT: vloxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vsext.vf8 v26, v9 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_sextload_nxv2i64: @@ -157,7 +157,7 @@ ; RV32-NEXT: vloxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vzext.vf8 v26, v9 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i8_zextload_nxv2i64: @@ -358,7 +358,7 @@ ; RV32-NEXT: vloxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vsext.vf4 v26, v9 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_sextload_nxv2i64: @@ -380,7 +380,7 @@ ; RV32-NEXT: vloxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vzext.vf4 v26, v9 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i16_zextload_nxv2i64: @@ -420,14 +420,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; RV32-NEXT: vloxei32.v v25, (zero), v8 -; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: vmv.v.v v8, v25 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; RV64-NEXT: vloxei64.v v25, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: vmv.v.v v8, v25 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -618,7 +618,7 @@ ; RV32-NEXT: vloxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vsext.vf2 v26, v9 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32_sextload_nxv2i64: @@ -640,7 +640,7 @@ ; RV32-NEXT: vloxei32.v v9, (zero), v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu ; RV32-NEXT: vzext.vf2 v26, v9 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_nxv2i32_zextload_nxv2i64: @@ -686,7 +686,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; RV64-NEXT: vloxei64.v v26, (zero), v8 -; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: vmv.v.v v8, v26 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -971,7 +971,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; RV32-NEXT: vloxei32.v v28, (zero), v8 -; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: vmv.v.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4i64: @@ -1403,14 +1403,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; RV32-NEXT: vloxei32.v v25, (zero), v8 -; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: vmv.v.v v8, v25 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; RV64-NEXT: vloxei64.v v25, (zero), v8 -; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: vmv.v.v v8, v25 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1625,7 +1625,7 @@ ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; RV64-NEXT: vloxei64.v v26, (zero), v8 -; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: vmv.v.v v8, v26 ; RV64-NEXT: ret %mhead = insertelement undef, i1 1, i32 0 %mtrue = shufflevector %mhead, undef, zeroinitializer @@ -1910,7 +1910,7 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; RV32-NEXT: vloxei32.v v28, (zero), v8 -; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: vmv.v.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: mgather_truemask_nxv4f64: diff --git a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll --- a/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll @@ -290,7 +290,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v26, v26, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v25, v8, v26 -; RV32-BITS-UNKNOWN-NEXT: vmv1r.v v8, v25 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v25 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv8i8: @@ -301,7 +301,7 @@ ; RV32-BITS-256-NEXT: vid.v v25 ; RV32-BITS-256-NEXT: vrsub.vx v26, v25, a0 ; RV32-BITS-256-NEXT: vrgather.vv v25, v8, v26 -; RV32-BITS-256-NEXT: vmv1r.v v8, v25 +; RV32-BITS-256-NEXT: vmv.v.v v8, v25 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv8i8: @@ -312,7 +312,7 @@ ; RV32-BITS-512-NEXT: vid.v v25 ; RV32-BITS-512-NEXT: vrsub.vx v26, v25, a0 ; RV32-BITS-512-NEXT: vrgather.vv v25, v8, v26 -; RV32-BITS-512-NEXT: vmv1r.v v8, v25 +; RV32-BITS-512-NEXT: vmv.v.v v8, v25 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv8i8: @@ -324,7 +324,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v26, v26, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8,m1,ta,mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v25, v8, v26 -; RV64-BITS-UNKNOWN-NEXT: vmv1r.v v8, v25 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v25 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv8i8: @@ -335,7 +335,7 @@ ; RV64-BITS-256-NEXT: vid.v v25 ; RV64-BITS-256-NEXT: vrsub.vx v26, v25, a0 ; RV64-BITS-256-NEXT: vrgather.vv v25, v8, v26 -; RV64-BITS-256-NEXT: vmv1r.v v8, v25 +; RV64-BITS-256-NEXT: vmv.v.v v8, v25 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv8i8: @@ -346,7 +346,7 @@ ; RV64-BITS-512-NEXT: vid.v v25 ; RV64-BITS-512-NEXT: vrsub.vx v26, v25, a0 ; RV64-BITS-512-NEXT: vrgather.vv v25, v8, v26 -; RV64-BITS-512-NEXT: vmv1r.v v8, v25 +; RV64-BITS-512-NEXT: vmv.v.v v8, v25 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i8( %a) ret %res @@ -363,7 +363,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v28, v28, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v26, v8, v28 -; RV32-BITS-UNKNOWN-NEXT: vmv2r.v v8, v26 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v26 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv16i8: @@ -375,7 +375,7 @@ ; RV32-BITS-256-NEXT: vid.v v26 ; RV32-BITS-256-NEXT: vrsub.vx v28, v26, a0 ; RV32-BITS-256-NEXT: vrgather.vv v26, v8, v28 -; RV32-BITS-256-NEXT: vmv2r.v v8, v26 +; RV32-BITS-256-NEXT: vmv.v.v v8, v26 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv16i8: @@ -387,7 +387,7 @@ ; RV32-BITS-512-NEXT: vid.v v26 ; RV32-BITS-512-NEXT: vrsub.vx v28, v26, a0 ; RV32-BITS-512-NEXT: vrgather.vv v26, v8, v28 -; RV32-BITS-512-NEXT: vmv2r.v v8, v26 +; RV32-BITS-512-NEXT: vmv.v.v v8, v26 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv16i8: @@ -400,7 +400,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v28, v28, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8,m2,ta,mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v26, v8, v28 -; RV64-BITS-UNKNOWN-NEXT: vmv2r.v v8, v26 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v26 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv16i8: @@ -412,7 +412,7 @@ ; RV64-BITS-256-NEXT: vid.v v26 ; RV64-BITS-256-NEXT: vrsub.vx v28, v26, a0 ; RV64-BITS-256-NEXT: vrgather.vv v26, v8, v28 -; RV64-BITS-256-NEXT: vmv2r.v v8, v26 +; RV64-BITS-256-NEXT: vmv.v.v v8, v26 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv16i8: @@ -424,7 +424,7 @@ ; RV64-BITS-512-NEXT: vid.v v26 ; RV64-BITS-512-NEXT: vrsub.vx v28, v26, a0 ; RV64-BITS-512-NEXT: vrgather.vv v26, v8, v28 -; RV64-BITS-512-NEXT: vmv2r.v v8, v26 +; RV64-BITS-512-NEXT: vmv.v.v v8, v26 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i8( %a) ret %res @@ -441,7 +441,7 @@ ; RV32-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV32-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; RV32-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v8, v16 -; RV32-BITS-UNKNOWN-NEXT: vmv4r.v v8, v28 +; RV32-BITS-UNKNOWN-NEXT: vmv.v.v v8, v28 ; RV32-BITS-UNKNOWN-NEXT: ret ; ; RV32-BITS-256-LABEL: reverse_nxv32i8: @@ -453,7 +453,7 @@ ; RV32-BITS-256-NEXT: vid.v v28 ; RV32-BITS-256-NEXT: vrsub.vx v12, v28, a0 ; RV32-BITS-256-NEXT: vrgather.vv v28, v8, v12 -; RV32-BITS-256-NEXT: vmv4r.v v8, v28 +; RV32-BITS-256-NEXT: vmv.v.v v8, v28 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv32i8: @@ -465,7 +465,7 @@ ; RV32-BITS-512-NEXT: vid.v v28 ; RV32-BITS-512-NEXT: vrsub.vx v12, v28, a0 ; RV32-BITS-512-NEXT: vrgather.vv v28, v8, v12 -; RV32-BITS-512-NEXT: vmv4r.v v8, v28 +; RV32-BITS-512-NEXT: vmv.v.v v8, v28 ; RV32-BITS-512-NEXT: ret ; ; RV64-BITS-UNKNOWN-LABEL: reverse_nxv32i8: @@ -478,7 +478,7 @@ ; RV64-BITS-UNKNOWN-NEXT: vrsub.vx v16, v16, a0 ; RV64-BITS-UNKNOWN-NEXT: vsetvli zero, zero, e8,m4,ta,mu ; RV64-BITS-UNKNOWN-NEXT: vrgatherei16.vv v28, v8, v16 -; RV64-BITS-UNKNOWN-NEXT: vmv4r.v v8, v28 +; RV64-BITS-UNKNOWN-NEXT: vmv.v.v v8, v28 ; RV64-BITS-UNKNOWN-NEXT: ret ; ; RV64-BITS-256-LABEL: reverse_nxv32i8: @@ -490,7 +490,7 @@ ; RV64-BITS-256-NEXT: vid.v v28 ; RV64-BITS-256-NEXT: vrsub.vx v12, v28, a0 ; RV64-BITS-256-NEXT: vrgather.vv v28, v8, v12 -; RV64-BITS-256-NEXT: vmv4r.v v8, v28 +; RV64-BITS-256-NEXT: vmv.v.v v8, v28 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv32i8: @@ -502,7 +502,7 @@ ; RV64-BITS-512-NEXT: vid.v v28 ; RV64-BITS-512-NEXT: vrsub.vx v12, v28, a0 ; RV64-BITS-512-NEXT: vrgather.vv v28, v8, v12 -; RV64-BITS-512-NEXT: vmv4r.v v8, v28 +; RV64-BITS-512-NEXT: vmv.v.v v8, v28 ; RV64-BITS-512-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i8( %a) ret %res @@ -532,7 +532,7 @@ ; RV32-BITS-256-NEXT: vid.v v16 ; RV32-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV32-BITS-256-NEXT: vrgather.vv v16, v8, v24 -; RV32-BITS-256-NEXT: vmv8r.v v8, v16 +; RV32-BITS-256-NEXT: vmv.v.v v8, v16 ; RV32-BITS-256-NEXT: ret ; ; RV32-BITS-512-LABEL: reverse_nxv64i8: @@ -571,7 +571,7 @@ ; RV64-BITS-256-NEXT: vid.v v16 ; RV64-BITS-256-NEXT: vrsub.vx v24, v16, a0 ; RV64-BITS-256-NEXT: vrgather.vv v16, v8, v24 -; RV64-BITS-256-NEXT: vmv8r.v v8, v16 +; RV64-BITS-256-NEXT: vmv.v.v v8, v16 ; RV64-BITS-256-NEXT: ret ; ; RV64-BITS-512-LABEL: reverse_nxv64i8: @@ -632,7 +632,7 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vx v26, v25, a0 ; CHECK-NEXT: vrgather.vv v25, v8, v26 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i16( %a) ret %res @@ -647,7 +647,7 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vrsub.vx v28, v26, a0 ; CHECK-NEXT: vrgather.vv v26, v8, v28 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i16( %a) ret %res @@ -663,7 +663,7 @@ ; CHECK-NEXT: vid.v v28 ; CHECK-NEXT: vrsub.vx v12, v28, a0 ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i16( %a) ret %res @@ -679,7 +679,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32i16( %a) ret %res @@ -711,7 +711,7 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vx v26, v25, a0 ; CHECK-NEXT: vrgather.vv v25, v8, v26 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i32( %a) ret %res @@ -727,7 +727,7 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vrsub.vx v28, v26, a0 ; CHECK-NEXT: vrgather.vv v26, v8, v28 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i32( %a) ret %res @@ -742,7 +742,7 @@ ; CHECK-NEXT: vid.v v28 ; CHECK-NEXT: vrsub.vx v12, v28, a0 ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i32( %a) ret %res @@ -758,7 +758,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16i32( %a) ret %res @@ -774,7 +774,7 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vx v26, v25, a0 ; CHECK-NEXT: vrgather.vv v25, v8, v26 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1i64( %a) ret %res @@ -790,7 +790,7 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vrsub.vx v28, v26, a0 ; CHECK-NEXT: vrgather.vv v26, v8, v28 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2i64( %a) ret %res @@ -806,7 +806,7 @@ ; CHECK-NEXT: vid.v v28 ; CHECK-NEXT: vrsub.vx v12, v28, a0 ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4i64( %a) ret %res @@ -821,7 +821,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8i64( %a) ret %res @@ -873,7 +873,7 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vx v26, v25, a0 ; CHECK-NEXT: vrgather.vv v25, v8, v26 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f16( %a) ret %res @@ -888,7 +888,7 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vrsub.vx v28, v26, a0 ; CHECK-NEXT: vrgather.vv v26, v8, v28 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f16( %a) ret %res @@ -904,7 +904,7 @@ ; CHECK-NEXT: vid.v v28 ; CHECK-NEXT: vrsub.vx v12, v28, a0 ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f16( %a) ret %res @@ -920,7 +920,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv32f16( %a) ret %res @@ -952,7 +952,7 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vx v26, v25, a0 ; CHECK-NEXT: vrgather.vv v25, v8, v26 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f32( %a) ret %res @@ -968,7 +968,7 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vrsub.vx v28, v26, a0 ; CHECK-NEXT: vrgather.vv v26, v8, v28 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f32( %a) ret %res @@ -983,7 +983,7 @@ ; CHECK-NEXT: vid.v v28 ; CHECK-NEXT: vrsub.vx v12, v28, a0 ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f32( %a) ret %res @@ -999,7 +999,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv16f32( %a) ret %res @@ -1015,7 +1015,7 @@ ; CHECK-NEXT: vid.v v25 ; CHECK-NEXT: vrsub.vx v26, v25, a0 ; CHECK-NEXT: vrgather.vv v25, v8, v26 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv1f64( %a) ret %res @@ -1031,7 +1031,7 @@ ; CHECK-NEXT: vid.v v26 ; CHECK-NEXT: vrsub.vx v28, v26, a0 ; CHECK-NEXT: vrgather.vv v26, v8, v28 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv2f64( %a) ret %res @@ -1047,7 +1047,7 @@ ; CHECK-NEXT: vid.v v28 ; CHECK-NEXT: vrsub.vx v12, v28, a0 ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv4f64( %a) ret %res @@ -1062,7 +1062,7 @@ ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a0 ; CHECK-NEXT: vrgather.vv v16, v8, v24 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %res = call @llvm.experimental.vector.reverse.nxv8f64( %a) ret %res diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll @@ -66,7 +66,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv1r.v v25, v1 +; SPILL-O0-NEXT: vmv.v.v v25, v1 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v25, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -121,7 +121,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v26, v2 +; SPILL-O0-NEXT: vmv.v.v v26, v2 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v26, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -179,7 +179,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv4r.v v28, v4 +; SPILL-O0-NEXT: vmv.v.v v28, v4 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs4r.v v28, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -237,7 +237,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; SPILL-O0-NEXT: vlseg3e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v26, v2 +; SPILL-O0-NEXT: vmv.v.v v26, v2 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v26, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll --- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll @@ -66,7 +66,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv1r.v v25, v1 +; SPILL-O0-NEXT: vmv.v.v v25, v1 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs1r.v v25, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -121,7 +121,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v26, v2 +; SPILL-O0-NEXT: vmv.v.v v26, v2 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v26, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -179,7 +179,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; SPILL-O0-NEXT: vlseg2e32.v v0, (a0) -; SPILL-O0-NEXT: vmv4r.v v28, v4 +; SPILL-O0-NEXT: vmv.v.v v28, v4 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs4r.v v28, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP @@ -237,7 +237,7 @@ ; SPILL-O0-NEXT: sub sp, sp, a2 ; SPILL-O0-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; SPILL-O0-NEXT: vlseg3e32.v v0, (a0) -; SPILL-O0-NEXT: vmv2r.v v26, v2 +; SPILL-O0-NEXT: vmv.v.v v26, v2 ; SPILL-O0-NEXT: addi a0, sp, 16 ; SPILL-O0-NEXT: vs2r.v v26, (a0) # Unknown-size Folded Spill ; SPILL-O0-NEXT: #APP diff --git a/llvm/test/CodeGen/RISCV/rvv/unsupported-calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/unsupported-calling-conv.ll --- a/llvm/test/CodeGen/RISCV/rvv/unsupported-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unsupported-calling-conv.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: not --crash llc -mtriple=riscv64 -mattr=+experimental-v < %s 2>&1 | FileCheck %s ; A rather pathological test case in which we exhaust all vector registers and diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv32.ll @@ -50,7 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -105,7 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -116,7 +116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -127,7 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -138,7 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -171,7 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -182,7 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -193,7 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -204,7 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -226,7 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -237,7 +237,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -248,7 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -259,7 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -292,7 +292,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -303,7 +303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -314,7 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -325,7 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -358,7 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -369,7 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -380,7 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -391,7 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -413,7 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -424,7 +424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -435,7 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -446,7 +446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -457,7 +457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -468,7 +468,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -479,7 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -490,7 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -501,7 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -512,7 +512,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -523,7 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -534,7 +534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -567,7 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -578,7 +578,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -589,7 +589,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -600,7 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -611,7 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vexts-sdnode-rv64.ll @@ -50,7 +50,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -61,7 +61,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -94,7 +94,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -105,7 +105,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -116,7 +116,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -127,7 +127,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -138,7 +138,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -149,7 +149,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -171,7 +171,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -182,7 +182,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -193,7 +193,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -204,7 +204,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -226,7 +226,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -237,7 +237,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -248,7 +248,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -259,7 +259,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -292,7 +292,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -303,7 +303,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -314,7 +314,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -325,7 +325,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -358,7 +358,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -369,7 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -380,7 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -391,7 +391,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -402,7 +402,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -413,7 +413,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -424,7 +424,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -435,7 +435,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -446,7 +446,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -457,7 +457,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -468,7 +468,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -479,7 +479,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -490,7 +490,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -501,7 +501,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -512,7 +512,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -523,7 +523,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -534,7 +534,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -556,7 +556,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -567,7 +567,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -578,7 +578,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -589,7 +589,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = zext %va to ret %evec @@ -600,7 +600,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = sext %va to ret %evec @@ -611,7 +611,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %evec = zext %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rod.f.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( @@ -338,7 +338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll @@ -369,7 +369,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -380,7 +380,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -481,7 +481,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -492,7 +492,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -569,7 +569,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -580,7 +580,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -883,7 +883,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -894,7 +894,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1006,7 +1006,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1107,7 +1107,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1118,7 +1118,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1343,7 +1343,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1354,7 +1354,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1461,7 +1461,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1472,7 +1472,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec @@ -1579,7 +1579,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = fptosi %va to ret %evec @@ -1590,7 +1590,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = fptoui %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-sdnode.ll @@ -48,14 +48,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; RV32-NEXT: vfncvt.f.f.w v25, v8 -; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: vmv.v.v v8, v25 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f32_nxv4f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; RV64-NEXT: vfncvt.f.f.w v25, v8 -; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: vmv.v.v v8, v25 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -67,14 +67,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; RV32-NEXT: vfncvt.f.f.w v26, v8 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f32_nxv8f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; RV64-NEXT: vfncvt.f.f.w v26, v8 -; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: vmv.v.v v8, v26 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -86,14 +86,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; RV32-NEXT: vfncvt.f.f.w v28, v8 -; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: vmv.v.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv16f32_nxv16f16: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; RV64-NEXT: vfncvt.f.f.w v28, v8 -; RV64-NEXT: vmv4r.v v8, v28 +; RV64-NEXT: vmv.v.v v8, v28 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -166,14 +166,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; RV32-NEXT: vfncvt.f.f.w v25, v8 -; RV32-NEXT: vmv1r.v v8, v25 +; RV32-NEXT: vmv.v.v v8, v25 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv2f64_nxv2f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; RV64-NEXT: vfncvt.f.f.w v25, v8 -; RV64-NEXT: vmv1r.v v8, v25 +; RV64-NEXT: vmv.v.v v8, v25 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -206,14 +206,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; RV32-NEXT: vfncvt.f.f.w v26, v8 -; RV32-NEXT: vmv2r.v v8, v26 +; RV32-NEXT: vmv.v.v v8, v26 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv4f64_nxv4f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; RV64-NEXT: vfncvt.f.f.w v26, v8 -; RV64-NEXT: vmv2r.v v8, v26 +; RV64-NEXT: vmv.v.v v8, v26 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec @@ -246,14 +246,14 @@ ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; RV32-NEXT: vfncvt.f.f.w v28, v8 -; RV32-NEXT: vmv4r.v v8, v28 +; RV32-NEXT: vmv.v.v v8, v28 ; RV32-NEXT: ret ; ; RV64-LABEL: vfptrunc_nxv8f64_nxv8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; RV64-NEXT: vfncvt.f.f.w v28, v8 -; RV64-NEXT: vmv4r.v v8, v28 +; RV64-NEXT: vmv.v.v v8, v28 ; RV64-NEXT: ret %evec = fptrunc %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll @@ -106,7 +106,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vfslide1up.vf v25, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( @@ -153,7 +153,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vfslide1up.vf v26, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( @@ -200,7 +200,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vfslide1up.vf v28, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( @@ -247,7 +247,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( @@ -341,7 +341,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vfslide1up.vf v25, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( @@ -388,7 +388,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vfslide1up.vf v26, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( @@ -435,7 +435,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vfslide1up.vf v28, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( @@ -482,7 +482,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( @@ -532,7 +532,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vfslide1up.vf v25, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -587,7 +587,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vfslide1up.vf v26, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -642,7 +642,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vfslide1up.vf v28, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -697,7 +697,7 @@ ; CHECK-NEXT: fld ft0, 8(sp) ; CHECK-NEXT: vsetvli zero, a2, e64,m8,ta,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll @@ -106,7 +106,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vfslide1up.vf v25, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( @@ -153,7 +153,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vfslide1up.vf v26, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( @@ -200,7 +200,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vfslide1up.vf v28, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( @@ -247,7 +247,7 @@ ; CHECK-NEXT: fmv.h.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( @@ -341,7 +341,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vfslide1up.vf v25, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( @@ -388,7 +388,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vfslide1up.vf v26, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( @@ -435,7 +435,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vfslide1up.vf v28, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( @@ -482,7 +482,7 @@ ; CHECK-NEXT: fmv.w.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( @@ -529,7 +529,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vfslide1up.vf v25, v8, ft0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( @@ -576,7 +576,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vfslide1up.vf v26, v8, ft0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( @@ -623,7 +623,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vfslide1up.vf v28, v8, ft0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( @@ -670,7 +670,7 @@ ; CHECK-NEXT: fmv.d.x ft0, a0 ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vfslide1up.vf v16, v8, ft0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll @@ -1145,7 +1145,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1156,7 +1156,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1220,7 +1220,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1273,7 +1273,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1284,7 +1284,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1405,7 +1405,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1416,7 +1416,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1471,7 +1471,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1482,7 +1482,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec @@ -1537,7 +1537,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.x.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = sitofp %va to ret %evec @@ -1548,7 +1548,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vfncvt.f.xu.w v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %evec = uitofp %va to ret %evec diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( @@ -326,7 +326,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( @@ -817,7 +817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( @@ -862,7 +862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( @@ -997,7 +997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( @@ -1042,7 +1042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( @@ -1398,7 +1398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( @@ -1443,7 +1443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( @@ -1578,7 +1578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( @@ -1623,7 +1623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( @@ -1668,7 +1668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( @@ -1933,7 +1933,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( @@ -1978,7 +1978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( @@ -2023,7 +2023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( @@ -2203,7 +2203,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( @@ -2248,7 +2248,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( @@ -2293,7 +2293,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( @@ -2558,7 +2558,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( @@ -2603,7 +2603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( @@ -2648,7 +2648,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( @@ -2693,7 +2693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( @@ -2873,7 +2873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( @@ -2918,7 +2918,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( @@ -2963,7 +2963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( @@ -3317,7 +3317,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( @@ -3362,7 +3362,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( @@ -3407,7 +3407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( @@ -3452,7 +3452,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( @@ -3497,7 +3497,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( @@ -3542,7 +3542,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( @@ -3587,7 +3587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( @@ -3632,7 +3632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( @@ -3986,7 +3986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( @@ -4031,7 +4031,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( @@ -4076,7 +4076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( @@ -4166,7 +4166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( @@ -4211,7 +4211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( @@ -4256,7 +4256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( @@ -4301,7 +4301,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( @@ -4744,7 +4744,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( @@ -4789,7 +4789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( @@ -4834,7 +4834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( @@ -4879,7 +4879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( @@ -4969,7 +4969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( @@ -5014,7 +5014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( @@ -5059,7 +5059,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( @@ -5104,7 +5104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( @@ -5149,7 +5149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( @@ -5194,7 +5194,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( @@ -5239,7 +5239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( @@ -5284,7 +5284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( @@ -5419,7 +5419,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( @@ -5464,7 +5464,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( @@ -5509,7 +5509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( @@ -5554,7 +5554,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( @@ -5644,7 +5644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( @@ -5689,7 +5689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( @@ -5734,7 +5734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( @@ -5779,7 +5779,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( @@ -5824,7 +5824,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( @@ -5869,7 +5869,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( @@ -5914,7 +5914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( @@ -5959,7 +5959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( @@ -326,7 +326,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( @@ -817,7 +817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( @@ -862,7 +862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( @@ -997,7 +997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( @@ -1042,7 +1042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( @@ -1398,7 +1398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i32( @@ -1443,7 +1443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i32( @@ -1578,7 +1578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i32( @@ -1623,7 +1623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i32( @@ -1668,7 +1668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i32( @@ -1933,7 +1933,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i32( @@ -1978,7 +1978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i32( @@ -2023,7 +2023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i32( @@ -2203,7 +2203,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i32( @@ -2248,7 +2248,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i32( @@ -2293,7 +2293,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i32( @@ -2558,7 +2558,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i32( @@ -2603,7 +2603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i32( @@ -2648,7 +2648,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i32( @@ -2693,7 +2693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i32( @@ -2873,7 +2873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i16( @@ -2918,7 +2918,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i8.nxv16i16( @@ -2963,7 +2963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i8.nxv32i16( @@ -3317,7 +3317,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i16( @@ -3362,7 +3362,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i16( @@ -3407,7 +3407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i16( @@ -3452,7 +3452,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i16( @@ -3497,7 +3497,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i16( @@ -3542,7 +3542,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i16( @@ -3587,7 +3587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i16( @@ -3632,7 +3632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i16( @@ -3986,7 +3986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i16( @@ -4031,7 +4031,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i16( @@ -4076,7 +4076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i16( @@ -4166,7 +4166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i16( @@ -4211,7 +4211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i16( @@ -4256,7 +4256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i16( @@ -4301,7 +4301,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i16( @@ -4744,7 +4744,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i8( @@ -4789,7 +4789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i8( @@ -4834,7 +4834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i16.nxv16i8( @@ -4879,7 +4879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32i16.nxv32i8( @@ -4969,7 +4969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i8( @@ -5014,7 +5014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i8( @@ -5059,7 +5059,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i8( @@ -5104,7 +5104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16i32.nxv16i8( @@ -5149,7 +5149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i8( @@ -5194,7 +5194,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i8( @@ -5239,7 +5239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i8( @@ -5284,7 +5284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i8( @@ -5419,7 +5419,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i8( @@ -5464,7 +5464,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i8( @@ -5509,7 +5509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f16.nxv16i8( @@ -5554,7 +5554,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv32f16.nxv32i8( @@ -5644,7 +5644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i8( @@ -5689,7 +5689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i8( @@ -5734,7 +5734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i8( @@ -5779,7 +5779,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv16f32.nxv16i8( @@ -5824,7 +5824,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i8( @@ -5869,7 +5869,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i8( @@ -5914,7 +5914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i8( @@ -5959,7 +5959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vloxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) @@ -40,7 +40,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) @@ -70,7 +70,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) @@ -811,7 +811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -841,7 +841,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -901,7 +901,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -932,7 +932,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -964,7 +964,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -1028,7 +1028,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -1061,7 +1061,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -1093,7 +1093,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1123,7 +1123,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1153,7 +1153,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1183,7 +1183,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1215,7 +1215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1247,7 +1247,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1279,7 +1279,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1312,7 +1312,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1345,7 +1345,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1378,7 +1378,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1412,7 +1412,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1446,7 +1446,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1480,7 +1480,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1515,7 +1515,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1550,7 +1550,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1585,7 +1585,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1621,7 +1621,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1657,7 +1657,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1693,7 +1693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1730,7 +1730,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1804,7 +1804,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1834,7 +1834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1864,7 +1864,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1894,7 +1894,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1926,7 +1926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1958,7 +1958,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1989,7 +1989,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2022,7 +2022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2055,7 +2055,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2088,7 +2088,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2122,7 +2122,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2156,7 +2156,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2190,7 +2190,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2225,7 +2225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2260,7 +2260,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2295,7 +2295,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2331,7 +2331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2367,7 +2367,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2403,7 +2403,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2440,7 +2440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2477,7 +2477,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -3225,7 +3225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3255,7 +3255,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3285,7 +3285,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3315,7 +3315,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3347,7 +3347,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3379,7 +3379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3410,7 +3410,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3443,7 +3443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3476,7 +3476,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3509,7 +3509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3539,7 +3539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3569,7 +3569,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3599,7 +3599,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3630,7 +3630,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3662,7 +3662,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3693,7 +3693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3726,7 +3726,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3759,7 +3759,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3791,7 +3791,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3825,7 +3825,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3859,7 +3859,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3892,7 +3892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3927,7 +3927,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3962,7 +3962,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3997,7 +3997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4033,7 +4033,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4069,7 +4069,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4105,7 +4105,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4142,7 +4142,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4179,7 +4179,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4216,7 +4216,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) @@ -4246,7 +4246,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) @@ -4276,7 +4276,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) @@ -5727,7 +5727,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) @@ -5757,7 +5757,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) @@ -7209,7 +7209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7239,7 +7239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7269,7 +7269,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7299,7 +7299,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7331,7 +7331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7363,7 +7363,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7395,7 +7395,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7428,7 +7428,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7461,7 +7461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7494,7 +7494,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) @@ -7524,7 +7524,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) @@ -7554,7 +7554,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) @@ -7584,7 +7584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) @@ -7614,7 +7614,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) @@ -7644,7 +7644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) @@ -7674,7 +7674,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7704,7 +7704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7734,7 +7734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7764,7 +7764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7796,7 +7796,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7828,7 +7828,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7860,7 +7860,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7893,7 +7893,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7926,7 +7926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7959,7 +7959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7993,7 +7993,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8027,7 +8027,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8061,7 +8061,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8096,7 +8096,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8131,7 +8131,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8166,7 +8166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8202,7 +8202,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8238,7 +8238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8274,7 +8274,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8311,7 +8311,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8348,7 +8348,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8385,7 +8385,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8415,7 +8415,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8445,7 +8445,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8475,7 +8475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8507,7 +8507,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8539,7 +8539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8571,7 +8571,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8604,7 +8604,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8637,7 +8637,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8670,7 +8670,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8704,7 +8704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8738,7 +8738,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8772,7 +8772,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8807,7 +8807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8842,7 +8842,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8877,7 +8877,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8913,7 +8913,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8949,7 +8949,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8985,7 +8985,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -9022,7 +9022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -9059,7 +9059,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -10518,7 +10518,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10548,7 +10548,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10578,7 +10578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10608,7 +10608,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10640,7 +10640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10672,7 +10672,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10703,7 +10703,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10736,7 +10736,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10769,7 +10769,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10802,7 +10802,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) @@ -10832,7 +10832,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) @@ -10862,7 +10862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) @@ -10892,7 +10892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10922,7 +10922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -10952,7 +10952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -10982,7 +10982,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11014,7 +11014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11046,7 +11046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11078,7 +11078,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11111,7 +11111,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11144,7 +11144,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11177,7 +11177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11207,7 +11207,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11237,7 +11237,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11267,7 +11267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11299,7 +11299,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11331,7 +11331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11362,7 +11362,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11395,7 +11395,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11428,7 +11428,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11461,7 +11461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11495,7 +11495,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11529,7 +11529,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11563,7 +11563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11598,7 +11598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11633,7 +11633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11668,7 +11668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11704,7 +11704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11740,7 +11740,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11776,7 +11776,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11813,7 +11813,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11850,7 +11850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -12598,7 +12598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12628,7 +12628,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12658,7 +12658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12688,7 +12688,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12720,7 +12720,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12752,7 +12752,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12784,7 +12784,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12817,7 +12817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12850,7 +12850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) @@ -40,7 +40,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) @@ -70,7 +70,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) @@ -100,7 +100,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -130,7 +130,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -190,7 +190,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -252,7 +252,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -284,7 +284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -315,7 +315,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -347,7 +347,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -380,7 +380,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -413,7 +413,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -446,7 +446,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -479,7 +479,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -509,7 +509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -539,7 +539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -569,7 +569,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -600,7 +600,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -632,7 +632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -663,7 +663,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -696,7 +696,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -729,7 +729,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -761,7 +761,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -791,7 +791,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -821,7 +821,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -851,7 +851,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -881,7 +881,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -913,7 +913,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -945,7 +945,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -977,7 +977,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1009,7 +1009,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1042,7 +1042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1075,7 +1075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1108,7 +1108,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1141,7 +1141,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1175,7 +1175,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1243,7 +1243,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1277,7 +1277,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1312,7 +1312,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1347,7 +1347,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1382,7 +1382,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1417,7 +1417,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1453,7 +1453,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1489,7 +1489,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1525,7 +1525,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1561,7 +1561,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1598,7 +1598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1635,7 +1635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1672,7 +1672,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -2657,7 +2657,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2687,7 +2687,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2717,7 +2717,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2747,7 +2747,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2777,7 +2777,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2809,7 +2809,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2841,7 +2841,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2872,7 +2872,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2903,7 +2903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2936,7 +2936,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2969,7 +2969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -3001,7 +3001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -4926,7 +4926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -4956,7 +4956,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -4986,7 +4986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5016,7 +5016,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5046,7 +5046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5078,7 +5078,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5110,7 +5110,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5142,7 +5142,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5173,7 +5173,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5206,7 +5206,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5239,7 +5239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5272,7 +5272,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5305,7 +5305,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5339,7 +5339,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5373,7 +5373,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5407,7 +5407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5441,7 +5441,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5476,7 +5476,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5511,7 +5511,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5546,7 +5546,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5581,7 +5581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5617,7 +5617,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5653,7 +5653,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5689,7 +5689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5725,7 +5725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5762,7 +5762,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5799,7 +5799,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5836,7 +5836,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5873,7 +5873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -5903,7 +5903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -5933,7 +5933,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -5963,7 +5963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -5993,7 +5993,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6024,7 +6024,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6056,7 +6056,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6087,7 +6087,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6118,7 +6118,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6151,7 +6151,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6184,7 +6184,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6216,7 +6216,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6248,7 +6248,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6282,7 +6282,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6316,7 +6316,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6349,7 +6349,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6382,7 +6382,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6417,7 +6417,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6452,7 +6452,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6486,7 +6486,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6521,7 +6521,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6557,7 +6557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6593,7 +6593,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6628,7 +6628,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6664,7 +6664,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6701,7 +6701,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6738,7 +6738,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6774,7 +6774,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6811,7 +6811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) @@ -6841,7 +6841,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) @@ -6871,7 +6871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) @@ -6901,7 +6901,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) @@ -6931,7 +6931,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -6961,7 +6961,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -6991,7 +6991,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7021,7 +7021,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7051,7 +7051,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7082,7 +7082,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7114,7 +7114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7145,7 +7145,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7177,7 +7177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7210,7 +7210,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7243,7 +7243,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7275,7 +7275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7308,7 +7308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7342,7 +7342,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7376,7 +7376,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7409,7 +7409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7443,7 +7443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7478,7 +7478,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7513,7 +7513,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7548,7 +7548,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7583,7 +7583,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7619,7 +7619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7655,7 +7655,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7691,7 +7691,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7727,7 +7727,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7764,7 +7764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7801,7 +7801,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7838,7 +7838,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -9770,7 +9770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) @@ -9800,7 +9800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) @@ -9830,7 +9830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) @@ -9860,7 +9860,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) @@ -9890,7 +9890,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) @@ -9920,7 +9920,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) @@ -10897,7 +10897,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -10927,7 +10927,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -10957,7 +10957,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -10987,7 +10987,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11017,7 +11017,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11049,7 +11049,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11081,7 +11081,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11113,7 +11113,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11145,7 +11145,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11178,7 +11178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11211,7 +11211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11244,7 +11244,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11277,7 +11277,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) @@ -11307,7 +11307,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) @@ -11337,7 +11337,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) @@ -11367,7 +11367,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) @@ -11397,7 +11397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) @@ -11427,7 +11427,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) @@ -11457,7 +11457,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) @@ -11487,7 +11487,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11517,7 +11517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11547,7 +11547,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11577,7 +11577,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11607,7 +11607,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11639,7 +11639,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11671,7 +11671,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11703,7 +11703,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11735,7 +11735,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11768,7 +11768,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11801,7 +11801,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11834,7 +11834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11867,7 +11867,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11901,7 +11901,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11935,7 +11935,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11969,7 +11969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12003,7 +12003,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12038,7 +12038,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12073,7 +12073,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12108,7 +12108,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12143,7 +12143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12179,7 +12179,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12215,7 +12215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12251,7 +12251,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12287,7 +12287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12324,7 +12324,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12361,7 +12361,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12398,7 +12398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12435,7 +12435,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12465,7 +12465,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12495,7 +12495,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12525,7 +12525,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12555,7 +12555,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12587,7 +12587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12619,7 +12619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12651,7 +12651,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12682,7 +12682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12715,7 +12715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12748,7 +12748,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12781,7 +12781,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12814,7 +12814,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12848,7 +12848,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12882,7 +12882,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12916,7 +12916,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12950,7 +12950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12985,7 +12985,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13020,7 +13020,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13055,7 +13055,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13090,7 +13090,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13126,7 +13126,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13162,7 +13162,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13198,7 +13198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13234,7 +13234,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13271,7 +13271,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13308,7 +13308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13345,7 +13345,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -15278,7 +15278,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15308,7 +15308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15338,7 +15338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15368,7 +15368,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15398,7 +15398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15430,7 +15430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15462,7 +15462,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15493,7 +15493,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15524,7 +15524,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15557,7 +15557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15590,7 +15590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15622,7 +15622,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15655,7 +15655,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) @@ -15685,7 +15685,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) @@ -15715,7 +15715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) @@ -15745,7 +15745,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) @@ -15775,7 +15775,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15805,7 +15805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15835,7 +15835,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15865,7 +15865,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -15895,7 +15895,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15927,7 +15927,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15959,7 +15959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15991,7 +15991,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16023,7 +16023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -16056,7 +16056,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -16089,7 +16089,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -16122,7 +16122,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16155,7 +16155,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16185,7 +16185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16215,7 +16215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16245,7 +16245,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16275,7 +16275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16306,7 +16306,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16338,7 +16338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16369,7 +16369,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16401,7 +16401,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16434,7 +16434,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16467,7 +16467,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16499,7 +16499,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16532,7 +16532,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16566,7 +16566,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16600,7 +16600,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16633,7 +16633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vloxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16667,7 +16667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16702,7 +16702,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16737,7 +16737,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16772,7 +16772,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vloxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16807,7 +16807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16843,7 +16843,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16879,7 +16879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16915,7 +16915,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vloxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16951,7 +16951,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16988,7 +16988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -17025,7 +17025,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -17062,7 +17062,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vloxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vloxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -18046,7 +18046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18076,7 +18076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18106,7 +18106,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18136,7 +18136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vloxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18166,7 +18166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18198,7 +18198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18230,7 +18230,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18261,7 +18261,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vloxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18293,7 +18293,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18326,7 +18326,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18359,7 +18359,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18392,7 +18392,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vloxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vloxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -316,7 +316,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vlseg2e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -350,8 +350,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vlseg3e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -385,9 +385,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vlseg4e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -421,7 +421,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -455,8 +455,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -490,9 +490,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -526,10 +526,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -563,11 +563,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -601,12 +601,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -640,13 +640,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -680,7 +680,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -714,8 +714,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -749,9 +749,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -785,10 +785,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -822,11 +822,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -860,12 +860,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -899,13 +899,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1198,7 +1198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -1232,8 +1232,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -1267,9 +1267,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -1303,7 +1303,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1337,8 +1337,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -1372,9 +1372,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -1408,10 +1408,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -1445,11 +1445,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -1483,12 +1483,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -1522,13 +1522,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1562,7 +1562,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2114,7 +2114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vlseg2e8.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu ; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2666,7 +2666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -2700,8 +2700,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -2735,9 +2735,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -2771,7 +2771,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2805,7 +2805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2839,7 +2839,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -2873,8 +2873,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg3e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -2908,9 +2908,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg4e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -2944,10 +2944,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg5e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -2981,11 +2981,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg6e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3019,12 +3019,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg7e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3058,13 +3058,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg8e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -3098,7 +3098,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -3132,8 +3132,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -3167,9 +3167,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -3203,10 +3203,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -3240,11 +3240,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3278,12 +3278,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3317,13 +3317,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -3875,7 +3875,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -3909,8 +3909,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -3944,9 +3944,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -3980,7 +3980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -4014,7 +4014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4048,8 +4048,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg3e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4083,9 +4083,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg4e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -4119,7 +4119,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -4153,8 +4153,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -4188,9 +4188,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -4224,10 +4224,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -4261,11 +4261,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -4299,12 +4299,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -4338,13 +4338,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -4637,7 +4637,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4671,8 +4671,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4706,9 +4706,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -91,8 +91,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -126,9 +126,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -162,7 +162,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vlseg2e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -196,8 +196,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vlseg3e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -231,9 +231,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vlseg4e8.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -267,7 +267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -301,8 +301,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg3e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -336,9 +336,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg4e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -372,10 +372,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg5e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -409,11 +409,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg6e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -447,12 +447,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg7e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -486,13 +486,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg8e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -819,8 +819,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -854,9 +854,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -1408,7 +1408,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1442,8 +1442,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -1477,9 +1477,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -1513,10 +1513,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -1550,11 +1550,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -1588,12 +1588,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -1627,13 +1627,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1667,7 +1667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg2e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1701,8 +1701,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg3e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -1736,9 +1736,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg4e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -1772,10 +1772,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg5e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -1809,11 +1809,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg6e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -1847,12 +1847,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg7e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -1886,13 +1886,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vlseg8e8.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1926,7 +1926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -1960,7 +1960,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1994,8 +1994,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -2029,9 +2029,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -2065,10 +2065,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -2102,11 +2102,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -2140,12 +2140,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -2179,13 +2179,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -2737,7 +2737,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2771,7 +2771,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vlseg2e8.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu ; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -3064,7 +3064,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -3098,8 +3098,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg3e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -3133,9 +3133,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg4e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -3169,7 +3169,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -3203,7 +3203,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vlseg2e64.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -3237,7 +3237,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg2e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -3271,8 +3271,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg3e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -3306,9 +3306,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg4e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -3342,10 +3342,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg5e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -3379,11 +3379,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg6e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3417,12 +3417,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg7e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3456,13 +3456,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vlseg8e64.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -3496,7 +3496,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -3530,8 +3530,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -3565,9 +3565,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -3601,10 +3601,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -3638,11 +3638,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3676,12 +3676,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3715,13 +3715,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -4273,7 +4273,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4307,8 +4307,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4342,9 +4342,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -4378,7 +4378,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -4412,7 +4412,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg2e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4446,8 +4446,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg3e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4481,9 +4481,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vlseg4e64.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -4517,7 +4517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -4551,8 +4551,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -4586,9 +4586,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -4622,10 +4622,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -4659,11 +4659,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -4697,12 +4697,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -4736,13 +4736,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0) -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -5035,7 +5035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -5069,8 +5069,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -5104,9 +5104,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0) -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -316,7 +316,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m2,ta,mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -350,8 +350,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m2,ta,mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -385,9 +385,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m2,ta,mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -421,7 +421,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -455,8 +455,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -490,9 +490,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -526,10 +526,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -563,11 +563,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -601,12 +601,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -640,13 +640,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -680,7 +680,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -714,8 +714,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -749,9 +749,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -785,10 +785,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -822,11 +822,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -860,12 +860,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -899,13 +899,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1198,7 +1198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -1232,8 +1232,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -1267,9 +1267,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -1303,7 +1303,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1337,8 +1337,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -1372,9 +1372,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -1408,10 +1408,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -1445,11 +1445,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -1483,12 +1483,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -1522,13 +1522,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1562,7 +1562,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m4,ta,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2114,7 +2114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m4,ta,mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2666,7 +2666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -2700,8 +2700,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -2735,9 +2735,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -2771,7 +2771,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2805,7 +2805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2839,7 +2839,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -2873,8 +2873,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -2908,9 +2908,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -2944,10 +2944,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -2981,11 +2981,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3019,12 +3019,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3058,13 +3058,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -3098,7 +3098,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -3132,8 +3132,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -3167,9 +3167,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -3203,10 +3203,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -3240,11 +3240,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3278,12 +3278,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3317,13 +3317,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -3875,7 +3875,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -3909,8 +3909,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -3944,9 +3944,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -3980,7 +3980,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m4,ta,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -4014,7 +4014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4048,8 +4048,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4083,9 +4083,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -4119,7 +4119,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -4153,8 +4153,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -4188,9 +4188,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -4224,10 +4224,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -4261,11 +4261,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -4299,12 +4299,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -4338,13 +4338,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -4637,7 +4637,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4671,8 +4671,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4706,9 +4706,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll @@ -23,7 +23,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -57,7 +57,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -91,8 +91,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -126,9 +126,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -162,7 +162,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m2,ta,mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -196,8 +196,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m2,ta,mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -231,9 +231,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m2,ta,mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e8,m2,tu,mu ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -267,7 +267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -301,8 +301,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -336,9 +336,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -372,10 +372,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -409,11 +409,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -447,12 +447,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -486,13 +486,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -785,7 +785,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -819,8 +819,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -854,9 +854,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -1408,7 +1408,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1442,8 +1442,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -1477,9 +1477,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -1513,10 +1513,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -1550,11 +1550,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -1588,12 +1588,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -1627,13 +1627,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1667,7 +1667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1701,8 +1701,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -1736,9 +1736,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -1772,10 +1772,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -1809,11 +1809,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -1847,12 +1847,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -1886,13 +1886,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m1,ta,mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e8,m1,tu,mu ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -1926,7 +1926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -1960,7 +1960,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -1994,8 +1994,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -2029,9 +2029,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -2065,10 +2065,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -2102,11 +2102,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -2140,12 +2140,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -2179,13 +2179,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -2737,7 +2737,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m4,ta,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -2771,7 +2771,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e8,m4,ta,mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e8,m4,tu,mu ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -3064,7 +3064,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -3098,8 +3098,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -3133,9 +3133,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -3169,7 +3169,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m4,ta,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -3203,7 +3203,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m4,ta,mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -3237,7 +3237,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -3271,8 +3271,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -3306,9 +3306,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -3342,10 +3342,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -3379,11 +3379,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3417,12 +3417,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3456,13 +3456,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m1,ta,mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -3496,7 +3496,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -3530,8 +3530,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -3565,9 +3565,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -3601,10 +3601,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -3638,11 +3638,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -3676,12 +3676,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -3715,13 +3715,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m1,ta,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e32,m1,tu,mu ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -4273,7 +4273,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4307,8 +4307,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4342,9 +4342,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m2,ta,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e16,m2,tu,mu ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -4378,7 +4378,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m4,ta,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e32,m4,tu,mu ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -4412,7 +4412,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -4446,8 +4446,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -4481,9 +4481,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64,m2,ta,mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e64,m2,tu,mu ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 @@ -4517,7 +4517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 +; CHECK-NEXT: vmv.v.v v8, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8 @@ -4551,8 +4551,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9 @@ -4586,9 +4586,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10 @@ -4622,10 +4622,10 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11 @@ -4659,11 +4659,11 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12 @@ -4697,12 +4697,12 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13 @@ -4736,13 +4736,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e16,m1,ta,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1 -; CHECK-NEXT: vmv1r.v v8, v7 -; CHECK-NEXT: vmv1r.v v9, v7 -; CHECK-NEXT: vmv1r.v v10, v7 -; CHECK-NEXT: vmv1r.v v11, v7 -; CHECK-NEXT: vmv1r.v v12, v7 -; CHECK-NEXT: vmv1r.v v13, v7 -; CHECK-NEXT: vmv1r.v v14, v7 +; CHECK-NEXT: vmv.v.v v8, v7 +; CHECK-NEXT: vmv.v.v v9, v7 +; CHECK-NEXT: vmv.v.v v10, v7 +; CHECK-NEXT: vmv.v.v v11, v7 +; CHECK-NEXT: vmv.v.v v12, v7 +; CHECK-NEXT: vmv.v.v v13, v7 +; CHECK-NEXT: vmv.v.v v14, v7 ; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14 @@ -5035,7 +5035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 +; CHECK-NEXT: vmv.v.v v8, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2 @@ -5069,8 +5069,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2 @@ -5104,9 +5104,9 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e32,m2,ta,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1 -; CHECK-NEXT: vmv2r.v v8, v6 -; CHECK-NEXT: vmv2r.v v10, v6 -; CHECK-NEXT: vmv2r.v v12, v6 +; CHECK-NEXT: vmv.v.v v8, v6 +; CHECK-NEXT: vmv.v.v v10, v6 +; CHECK-NEXT: vmv.v.v v12, v6 ; CHECK-NEXT: vsetvli zero, zero, e32,m2,tu,mu ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2 diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( @@ -326,7 +326,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( @@ -817,7 +817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( @@ -862,7 +862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( @@ -997,7 +997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( @@ -1042,7 +1042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( @@ -1398,7 +1398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( @@ -1443,7 +1443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( @@ -1578,7 +1578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( @@ -1623,7 +1623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( @@ -1668,7 +1668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( @@ -1933,7 +1933,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( @@ -1978,7 +1978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( @@ -2023,7 +2023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( @@ -2203,7 +2203,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( @@ -2248,7 +2248,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( @@ -2293,7 +2293,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( @@ -2558,7 +2558,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( @@ -2603,7 +2603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( @@ -2648,7 +2648,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( @@ -2693,7 +2693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( @@ -2873,7 +2873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( @@ -2918,7 +2918,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( @@ -2963,7 +2963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( @@ -3317,7 +3317,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( @@ -3362,7 +3362,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( @@ -3407,7 +3407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( @@ -3452,7 +3452,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( @@ -3497,7 +3497,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( @@ -3542,7 +3542,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( @@ -3587,7 +3587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( @@ -3632,7 +3632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( @@ -3986,7 +3986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( @@ -4031,7 +4031,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( @@ -4076,7 +4076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( @@ -4166,7 +4166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( @@ -4211,7 +4211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( @@ -4256,7 +4256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( @@ -4301,7 +4301,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( @@ -4744,7 +4744,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( @@ -4789,7 +4789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( @@ -4834,7 +4834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( @@ -4879,7 +4879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( @@ -4969,7 +4969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( @@ -5014,7 +5014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( @@ -5059,7 +5059,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( @@ -5104,7 +5104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( @@ -5149,7 +5149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( @@ -5194,7 +5194,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( @@ -5239,7 +5239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( @@ -5284,7 +5284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( @@ -5419,7 +5419,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( @@ -5464,7 +5464,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( @@ -5509,7 +5509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( @@ -5554,7 +5554,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( @@ -5644,7 +5644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( @@ -5689,7 +5689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( @@ -5734,7 +5734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( @@ -5779,7 +5779,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( @@ -5824,7 +5824,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( @@ -5869,7 +5869,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( @@ -5914,7 +5914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( @@ -5959,7 +5959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( @@ -326,7 +326,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( @@ -817,7 +817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( @@ -862,7 +862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( @@ -952,7 +952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( @@ -997,7 +997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( @@ -1042,7 +1042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( @@ -1398,7 +1398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i32( @@ -1443,7 +1443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i32( @@ -1578,7 +1578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i32( @@ -1623,7 +1623,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i32( @@ -1668,7 +1668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i32( @@ -1933,7 +1933,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i32( @@ -1978,7 +1978,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i32( @@ -2023,7 +2023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i32( @@ -2068,7 +2068,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i32( @@ -2203,7 +2203,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i32( @@ -2248,7 +2248,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i32( @@ -2293,7 +2293,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i32( @@ -2558,7 +2558,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei32.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i32( @@ -2603,7 +2603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei32.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i32( @@ -2648,7 +2648,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei32.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i32( @@ -2693,7 +2693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei32.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i32( @@ -2873,7 +2873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i16( @@ -2918,7 +2918,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i8.nxv16i16( @@ -2963,7 +2963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i8.nxv32i16( @@ -3317,7 +3317,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i16( @@ -3362,7 +3362,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i16( @@ -3407,7 +3407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i16( @@ -3452,7 +3452,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i16( @@ -3497,7 +3497,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i16( @@ -3542,7 +3542,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i16( @@ -3587,7 +3587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i16( @@ -3632,7 +3632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i16( @@ -3986,7 +3986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i16( @@ -4031,7 +4031,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i16( @@ -4076,7 +4076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i16( @@ -4121,7 +4121,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i16( @@ -4166,7 +4166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei16.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i16( @@ -4211,7 +4211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei16.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i16( @@ -4256,7 +4256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei16.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i16( @@ -4301,7 +4301,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei16.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i16( @@ -4744,7 +4744,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i8( @@ -4789,7 +4789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i8( @@ -4834,7 +4834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i16.nxv16i8( @@ -4879,7 +4879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32i16.nxv32i8( @@ -4969,7 +4969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i8( @@ -5014,7 +5014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i8( @@ -5059,7 +5059,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i8( @@ -5104,7 +5104,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16i32.nxv16i8( @@ -5149,7 +5149,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i8( @@ -5194,7 +5194,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i8( @@ -5239,7 +5239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i8( @@ -5284,7 +5284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i8( @@ -5419,7 +5419,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i8( @@ -5464,7 +5464,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i8( @@ -5509,7 +5509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f16.nxv16i8( @@ -5554,7 +5554,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv32f16.nxv32i8( @@ -5644,7 +5644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i8( @@ -5689,7 +5689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i8( @@ -5734,7 +5734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i8( @@ -5779,7 +5779,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv16f32.nxv16i8( @@ -5824,7 +5824,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxei8.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i8( @@ -5869,7 +5869,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxei8.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i8( @@ -5914,7 +5914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxei8.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i8( @@ -5959,7 +5959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vluxei8.v v16, (a0), v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i32 %vl) @@ -40,7 +40,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i32 %vl) @@ -70,7 +70,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i32 %vl) @@ -811,7 +811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -841,7 +841,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -901,7 +901,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -932,7 +932,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -964,7 +964,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -995,7 +995,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i32 %vl) @@ -1028,7 +1028,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i32 %vl) @@ -1061,7 +1061,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i32 %vl) @@ -1093,7 +1093,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1123,7 +1123,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1153,7 +1153,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1183,7 +1183,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1215,7 +1215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1247,7 +1247,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1279,7 +1279,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1312,7 +1312,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1345,7 +1345,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1378,7 +1378,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1412,7 +1412,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1446,7 +1446,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1480,7 +1480,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1515,7 +1515,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1550,7 +1550,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1585,7 +1585,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1621,7 +1621,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1657,7 +1657,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1693,7 +1693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i32 %vl) @@ -1730,7 +1730,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i32 %vl) @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i32 %vl) @@ -1804,7 +1804,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1834,7 +1834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1864,7 +1864,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1894,7 +1894,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -1926,7 +1926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -1958,7 +1958,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -1989,7 +1989,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2022,7 +2022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2055,7 +2055,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2088,7 +2088,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2122,7 +2122,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2156,7 +2156,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2190,7 +2190,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2225,7 +2225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2260,7 +2260,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2295,7 +2295,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2331,7 +2331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2367,7 +2367,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -2403,7 +2403,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i32 %vl) @@ -2440,7 +2440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i32 %vl) @@ -2477,7 +2477,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i32 %vl) @@ -3225,7 +3225,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3255,7 +3255,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3285,7 +3285,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3315,7 +3315,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3347,7 +3347,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3379,7 +3379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3410,7 +3410,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i32 %vl) @@ -3443,7 +3443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i32 %vl) @@ -3476,7 +3476,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i32 %vl) @@ -3509,7 +3509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3539,7 +3539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3569,7 +3569,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3599,7 +3599,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3630,7 +3630,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3662,7 +3662,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3693,7 +3693,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3726,7 +3726,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3759,7 +3759,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3791,7 +3791,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3825,7 +3825,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3859,7 +3859,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3892,7 +3892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -3927,7 +3927,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -3962,7 +3962,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -3997,7 +3997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4033,7 +4033,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4069,7 +4069,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4105,7 +4105,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i32 %vl) @@ -4142,7 +4142,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i32 %vl) @@ -4179,7 +4179,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i32 %vl) @@ -4216,7 +4216,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i32 %vl) @@ -4246,7 +4246,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i32 %vl) @@ -4276,7 +4276,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i32 %vl) @@ -5727,7 +5727,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i32 %vl) @@ -5757,7 +5757,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i32 %vl) @@ -7209,7 +7209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7239,7 +7239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7269,7 +7269,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7299,7 +7299,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7331,7 +7331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7363,7 +7363,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7395,7 +7395,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i32 %vl) @@ -7428,7 +7428,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i32 %vl) @@ -7461,7 +7461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i32 %vl) @@ -7494,7 +7494,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i32 %vl) @@ -7524,7 +7524,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i32 %vl) @@ -7554,7 +7554,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i32 %vl) @@ -7584,7 +7584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i32 %vl) @@ -7614,7 +7614,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i32 %vl) @@ -7644,7 +7644,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i32 %vl) @@ -7674,7 +7674,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7704,7 +7704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7734,7 +7734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7764,7 +7764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7796,7 +7796,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7828,7 +7828,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7860,7 +7860,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7893,7 +7893,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -7926,7 +7926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -7959,7 +7959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -7993,7 +7993,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8027,7 +8027,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8061,7 +8061,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8096,7 +8096,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8131,7 +8131,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8166,7 +8166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8202,7 +8202,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8238,7 +8238,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8274,7 +8274,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i32 %vl) @@ -8311,7 +8311,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i32 %vl) @@ -8348,7 +8348,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i32 %vl) @@ -8385,7 +8385,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8415,7 +8415,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8445,7 +8445,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8475,7 +8475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8507,7 +8507,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8539,7 +8539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8571,7 +8571,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8604,7 +8604,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8637,7 +8637,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8670,7 +8670,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8704,7 +8704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8738,7 +8738,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8772,7 +8772,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8807,7 +8807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8842,7 +8842,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8877,7 +8877,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -8913,7 +8913,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -8949,7 +8949,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -8985,7 +8985,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i32 %vl) @@ -9022,7 +9022,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i32 %vl) @@ -9059,7 +9059,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i32 %vl) @@ -10518,7 +10518,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10548,7 +10548,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10578,7 +10578,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10608,7 +10608,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10640,7 +10640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10672,7 +10672,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10703,7 +10703,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i32 %vl) @@ -10736,7 +10736,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i32 %vl) @@ -10769,7 +10769,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i32 %vl) @@ -10802,7 +10802,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i32 %vl) @@ -10832,7 +10832,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i32 %vl) @@ -10862,7 +10862,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i32 %vl) @@ -10892,7 +10892,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -10922,7 +10922,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -10952,7 +10952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -10982,7 +10982,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11014,7 +11014,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11046,7 +11046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11078,7 +11078,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i32 %vl) @@ -11111,7 +11111,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i32 %vl) @@ -11144,7 +11144,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i32 %vl) @@ -11177,7 +11177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11207,7 +11207,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11237,7 +11237,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11267,7 +11267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11299,7 +11299,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11331,7 +11331,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11362,7 +11362,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11395,7 +11395,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11428,7 +11428,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11461,7 +11461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11495,7 +11495,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11529,7 +11529,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11563,7 +11563,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11598,7 +11598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11633,7 +11633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11668,7 +11668,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11704,7 +11704,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11740,7 +11740,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -11776,7 +11776,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i32 %vl) @@ -11813,7 +11813,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i32 %vl) @@ -11850,7 +11850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i32 %vl) @@ -12598,7 +12598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12628,7 +12628,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12658,7 +12658,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12688,7 +12688,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12720,7 +12720,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12752,7 +12752,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) @@ -12784,7 +12784,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i32 %vl) @@ -12817,7 +12817,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i32 %vl) @@ -12850,7 +12850,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i32 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i16(i16* %base, %index, i64 %vl) @@ -40,7 +40,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i8(i16* %base, %index, i64 %vl) @@ -70,7 +70,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i16.nxv16i32(i16* %base, %index, i64 %vl) @@ -100,7 +100,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -130,7 +130,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -190,7 +190,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -220,7 +220,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -252,7 +252,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -284,7 +284,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -315,7 +315,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -347,7 +347,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i32(i32* %base, %index, i64 %vl) @@ -380,7 +380,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i8(i32* %base, %index, i64 %vl) @@ -413,7 +413,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i64(i32* %base, %index, i64 %vl) @@ -446,7 +446,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i32.nxv4i16(i32* %base, %index, i64 %vl) @@ -479,7 +479,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -509,7 +509,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -539,7 +539,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -569,7 +569,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -600,7 +600,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -632,7 +632,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -663,7 +663,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i16(i8* %base, %index, i64 %vl) @@ -696,7 +696,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i8(i8* %base, %index, i64 %vl) @@ -729,7 +729,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv16i8.nxv16i32(i8* %base, %index, i64 %vl) @@ -761,7 +761,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -791,7 +791,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -821,7 +821,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -851,7 +851,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -881,7 +881,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -913,7 +913,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -945,7 +945,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -977,7 +977,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1009,7 +1009,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1042,7 +1042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1075,7 +1075,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1108,7 +1108,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1141,7 +1141,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1175,7 +1175,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1209,7 +1209,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1243,7 +1243,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1277,7 +1277,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1312,7 +1312,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1347,7 +1347,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1382,7 +1382,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1417,7 +1417,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1453,7 +1453,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1489,7 +1489,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1525,7 +1525,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -1561,7 +1561,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i64(i64* %base, %index, i64 %vl) @@ -1598,7 +1598,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i32(i64* %base, %index, i64 %vl) @@ -1635,7 +1635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i16(i64* %base, %index, i64 %vl) @@ -1672,7 +1672,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1i64.nxv1i8(i64* %base, %index, i64 %vl) @@ -2657,7 +2657,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2687,7 +2687,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2717,7 +2717,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2747,7 +2747,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2777,7 +2777,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2809,7 +2809,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2841,7 +2841,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -2872,7 +2872,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -2903,7 +2903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i16(i16* %base, %index, i64 %vl) @@ -2936,7 +2936,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i8(i16* %base, %index, i64 %vl) @@ -2969,7 +2969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i64(i16* %base, %index, i64 %vl) @@ -3001,7 +3001,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i16.nxv8i32(i16* %base, %index, i64 %vl) @@ -4926,7 +4926,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -4956,7 +4956,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -4986,7 +4986,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5016,7 +5016,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5046,7 +5046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5078,7 +5078,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5110,7 +5110,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5142,7 +5142,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5173,7 +5173,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5206,7 +5206,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5239,7 +5239,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5272,7 +5272,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5305,7 +5305,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5339,7 +5339,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5373,7 +5373,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5407,7 +5407,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5441,7 +5441,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5476,7 +5476,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5511,7 +5511,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5546,7 +5546,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5581,7 +5581,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5617,7 +5617,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5653,7 +5653,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5689,7 +5689,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5725,7 +5725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i32(i32* %base, %index, i64 %vl) @@ -5762,7 +5762,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i8(i32* %base, %index, i64 %vl) @@ -5799,7 +5799,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i16(i32* %base, %index, i64 %vl) @@ -5836,7 +5836,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2i32.nxv2i64(i32* %base, %index, i64 %vl) @@ -5873,7 +5873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -5903,7 +5903,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -5933,7 +5933,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -5963,7 +5963,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -5993,7 +5993,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6024,7 +6024,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6056,7 +6056,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6087,7 +6087,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6118,7 +6118,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6151,7 +6151,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6184,7 +6184,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6216,7 +6216,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6248,7 +6248,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6282,7 +6282,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6316,7 +6316,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6349,7 +6349,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6382,7 +6382,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6417,7 +6417,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6452,7 +6452,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6486,7 +6486,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6521,7 +6521,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6557,7 +6557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6593,7 +6593,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6628,7 +6628,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6664,7 +6664,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i16(i8* %base, %index, i64 %vl) @@ -6701,7 +6701,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i8(i8* %base, %index, i64 %vl) @@ -6738,7 +6738,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i64(i8* %base, %index, i64 %vl) @@ -6774,7 +6774,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv8i8.nxv8i32(i8* %base, %index, i64 %vl) @@ -6811,7 +6811,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i32(i64* %base, %index, i64 %vl) @@ -6841,7 +6841,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i8(i64* %base, %index, i64 %vl) @@ -6871,7 +6871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i64(i64* %base, %index, i64 %vl) @@ -6901,7 +6901,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i64.nxv4i16(i64* %base, %index, i64 %vl) @@ -6931,7 +6931,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -6961,7 +6961,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -6991,7 +6991,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7021,7 +7021,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7051,7 +7051,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7082,7 +7082,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7114,7 +7114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7145,7 +7145,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7177,7 +7177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7210,7 +7210,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7243,7 +7243,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7275,7 +7275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7308,7 +7308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7342,7 +7342,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7376,7 +7376,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7409,7 +7409,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7443,7 +7443,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7478,7 +7478,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7513,7 +7513,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7548,7 +7548,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7583,7 +7583,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7619,7 +7619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7655,7 +7655,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7691,7 +7691,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -7727,7 +7727,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i32(i16* %base, %index, i64 %vl) @@ -7764,7 +7764,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i8(i16* %base, %index, i64 %vl) @@ -7801,7 +7801,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i64(i16* %base, %index, i64 %vl) @@ -7838,7 +7838,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4i16.nxv4i16(i16* %base, %index, i64 %vl) @@ -9770,7 +9770,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i16(i32* %base, %index, i64 %vl) @@ -9800,7 +9800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i8(i32* %base, %index, i64 %vl) @@ -9830,7 +9830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i64(i32* %base, %index, i64 %vl) @@ -9860,7 +9860,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8i32.nxv8i32(i32* %base, %index, i64 %vl) @@ -9890,7 +9890,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i16(i8* %base, %index, i64 %vl) @@ -9920,7 +9920,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv32i8.nxv32i8(i8* %base, %index, i64 %vl) @@ -10897,7 +10897,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -10927,7 +10927,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -10957,7 +10957,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -10987,7 +10987,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11017,7 +11017,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11049,7 +11049,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11081,7 +11081,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11113,7 +11113,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11145,7 +11145,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i32(i64* %base, %index, i64 %vl) @@ -11178,7 +11178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i8(i64* %base, %index, i64 %vl) @@ -11211,7 +11211,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i16(i64* %base, %index, i64 %vl) @@ -11244,7 +11244,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2i64.nxv2i64(i64* %base, %index, i64 %vl) @@ -11277,7 +11277,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i16(half* %base, %index, i64 %vl) @@ -11307,7 +11307,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i8(half* %base, %index, i64 %vl) @@ -11337,7 +11337,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv16f16.nxv16i32(half* %base, %index, i64 %vl) @@ -11367,7 +11367,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i32(double* %base, %index, i64 %vl) @@ -11397,7 +11397,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i8(double* %base, %index, i64 %vl) @@ -11427,7 +11427,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i64(double* %base, %index, i64 %vl) @@ -11457,7 +11457,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f64.nxv4i16(double* %base, %index, i64 %vl) @@ -11487,7 +11487,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11517,7 +11517,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11547,7 +11547,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11577,7 +11577,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11607,7 +11607,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11639,7 +11639,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11671,7 +11671,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11703,7 +11703,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11735,7 +11735,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11768,7 +11768,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11801,7 +11801,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11834,7 +11834,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -11867,7 +11867,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -11901,7 +11901,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -11935,7 +11935,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -11969,7 +11969,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12003,7 +12003,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12038,7 +12038,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12073,7 +12073,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12108,7 +12108,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12143,7 +12143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12179,7 +12179,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12215,7 +12215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12251,7 +12251,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12287,7 +12287,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i64(double* %base, %index, i64 %vl) @@ -12324,7 +12324,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i32(double* %base, %index, i64 %vl) @@ -12361,7 +12361,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i16(double* %base, %index, i64 %vl) @@ -12398,7 +12398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv1f64.nxv1i8(double* %base, %index, i64 %vl) @@ -12435,7 +12435,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12465,7 +12465,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12495,7 +12495,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12525,7 +12525,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12555,7 +12555,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12587,7 +12587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12619,7 +12619,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12651,7 +12651,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12682,7 +12682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12715,7 +12715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12748,7 +12748,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12781,7 +12781,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12814,7 +12814,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12848,7 +12848,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -12882,7 +12882,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -12916,7 +12916,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -12950,7 +12950,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -12985,7 +12985,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13020,7 +13020,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13055,7 +13055,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13090,7 +13090,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13126,7 +13126,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13162,7 +13162,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13198,7 +13198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -13234,7 +13234,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i32(float* %base, %index, i64 %vl) @@ -13271,7 +13271,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i8(float* %base, %index, i64 %vl) @@ -13308,7 +13308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i16(float* %base, %index, i64 %vl) @@ -13345,7 +13345,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv2f32.nxv2i64(float* %base, %index, i64 %vl) @@ -15278,7 +15278,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15308,7 +15308,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15338,7 +15338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15368,7 +15368,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15398,7 +15398,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15430,7 +15430,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15462,7 +15462,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15493,7 +15493,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15524,7 +15524,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i16(half* %base, %index, i64 %vl) @@ -15557,7 +15557,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i8(half* %base, %index, i64 %vl) @@ -15590,7 +15590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i64(half* %base, %index, i64 %vl) @@ -15622,7 +15622,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv8f16.nxv8i32(half* %base, %index, i64 %vl) @@ -15655,7 +15655,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i16(float* %base, %index, i64 %vl) @@ -15685,7 +15685,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i8(float* %base, %index, i64 %vl) @@ -15715,7 +15715,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i64(float* %base, %index, i64 %vl) @@ -15745,7 +15745,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv8f32.nxv8i32(float* %base, %index, i64 %vl) @@ -15775,7 +15775,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15805,7 +15805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15835,7 +15835,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15865,7 +15865,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -15895,7 +15895,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -15927,7 +15927,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -15959,7 +15959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -15991,7 +15991,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16023,7 +16023,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i32(double* %base, %index, i64 %vl) @@ -16056,7 +16056,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i8(double* %base, %index, i64 %vl) @@ -16089,7 +16089,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i16(double* %base, %index, i64 %vl) @@ -16122,7 +16122,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv2f64.nxv2i64(double* %base, %index, i64 %vl) @@ -16155,7 +16155,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16185,7 +16185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16215,7 +16215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16245,7 +16245,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16275,7 +16275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16306,7 +16306,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16338,7 +16338,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16369,7 +16369,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16401,7 +16401,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16434,7 +16434,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16467,7 +16467,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16499,7 +16499,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16532,7 +16532,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16566,7 +16566,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16600,7 +16600,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16633,7 +16633,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg5ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,} @llvm.riscv.vluxseg5.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16667,7 +16667,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16702,7 +16702,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16737,7 +16737,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16772,7 +16772,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg6ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,} @llvm.riscv.vluxseg6.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16807,7 +16807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16843,7 +16843,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -16879,7 +16879,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -16915,7 +16915,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg7ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,} @llvm.riscv.vluxseg7.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -16951,7 +16951,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i32(half* %base, %index, i64 %vl) @@ -16988,7 +16988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i8(half* %base, %index, i64 %vl) @@ -17025,7 +17025,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i64(half* %base, %index, i64 %vl) @@ -17062,7 +17062,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vluxseg8ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vmv.v.v v8, v1 ; CHECK-NEXT: ret entry: %0 = tail call {,,,,,,,} @llvm.riscv.vluxseg8.nxv4f16.nxv4i16(half* %base, %index, i64 %vl) @@ -18046,7 +18046,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18076,7 +18076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18106,7 +18106,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18136,7 +18136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,} @llvm.riscv.vluxseg2.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18166,7 +18166,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18198,7 +18198,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18230,7 +18230,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18261,7 +18261,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg3ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,} @llvm.riscv.vluxseg3.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) @@ -18293,7 +18293,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei32.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i32(float* %base, %index, i64 %vl) @@ -18326,7 +18326,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei8.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i8(float* %base, %index, i64 %vl) @@ -18359,7 +18359,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei64.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i64(float* %base, %index, i64 %vl) @@ -18392,7 +18392,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vluxseg4ei16.v v0, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vmv.v.v v8, v2 ; CHECK-NEXT: ret entry: %0 = tail call {,,,} @llvm.riscv.vluxseg4.nxv4f32.nxv4i16(float* %base, %index, i64 %vl) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -937,7 +937,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmadc.vvm v25, v8, v26, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: @@ -1093,7 +1093,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -1195,7 +1195,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -1280,7 +1280,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -1348,7 +1348,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmadc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -932,7 +932,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmadc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( @@ -1069,7 +1069,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv8i8.i8( @@ -1171,7 +1171,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv4i16.i16( @@ -1256,7 +1256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv2i32.i32( @@ -1324,7 +1324,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmadc.vim v25, v8, 9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmadc.carry.in.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( @@ -937,7 +937,7 @@ ; CHECK-NEXT: addi a0, sp, 8 ; CHECK-NEXT: vlse64.v v26, (a0), zero ; CHECK-NEXT: vmsbc.vvm v25, v8, v26, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll @@ -81,7 +81,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8( @@ -219,7 +219,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16( @@ -334,7 +334,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32( @@ -426,7 +426,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vmsbc.vvm v25, v8, v9, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64( @@ -587,7 +587,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8( @@ -725,7 +725,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16( @@ -840,7 +840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32( @@ -932,7 +932,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vmsbc.vxm v25, v8, a0, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsbf.m v25, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbf.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsbf.m v25, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsbf.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsif.m v25, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsif.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsif.m v25, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsif.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsof.m v25, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsof.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll @@ -136,7 +136,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vmsof.m v25, v0 -; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: vmv.v.v v0, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmsof.nxv8i1( diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir @@ -0,0 +1,49 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -verify-machineinstrs -mtriple riscv64 -run-pass=postrapseudos %s -o - | FileCheck %s + +... +--- +name: copy_whole_copy +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; CHECK-LABEL: name: copy_whole_copy + ; CHECK: liveins: $x14, $x16 + ; CHECK: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK: $v12m2 = PseudoVMV2R_V $v28m2 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v12m2 = COPY $v28m2 +... +--- +name: copy_vmv_v_v +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14, $x16 + ; CHECK-LABEL: name: copy_vmv_v_v + ; CHECK: liveins: $x14, $x16 + ; CHECK: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... +--- +name: copy_vmv_v_i +tracksRegLiveness: true +body: | + bb.0: + liveins: $x14 + ; CHECK-LABEL: name: copy_vmv_v_i + ; CHECK: liveins: $x14 + ; CHECK: $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + ; CHECK: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + ; CHECK: $v12m4 = PseudoVMV_V_I_M4 0, $noreg, 5 + $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype + $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype + $v12m4 = COPY $v28m4 +... diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclip.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclip.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclip.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnclipu.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnclipu.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnclipu.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclipu.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv32.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-rv64.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsra.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsra.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsra.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv32.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-rv64.ll @@ -143,7 +143,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16.nxv8i8( @@ -188,7 +188,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16.nxv16i8( @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16.nxv32i8( @@ -366,7 +366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32.nxv4i16( @@ -411,7 +411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32.nxv8i16( @@ -456,7 +456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32.nxv16i16( @@ -545,7 +545,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64.nxv2i32( @@ -590,7 +590,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64.nxv4i32( @@ -635,7 +635,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64.nxv8i32( @@ -812,7 +812,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -857,7 +857,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -902,7 +902,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1080,7 +1080,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1125,7 +1125,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1214,7 +1214,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1259,7 +1259,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1304,7 +1304,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( @@ -1440,7 +1440,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i8.nxv8i16( @@ -1473,7 +1473,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i8.nxv16i16( @@ -1506,7 +1506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv32i8.nxv32i16( @@ -1603,7 +1603,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i16.nxv4i32( @@ -1636,7 +1636,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i16.nxv8i32( @@ -1669,7 +1669,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv16i16.nxv16i32( @@ -1734,7 +1734,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv2i32.nxv2i64( @@ -1767,7 +1767,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv4i32.nxv4i64( @@ -1800,7 +1800,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsrl.nxv8i32.nxv8i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i8.i32( @@ -191,7 +191,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i8.i32( @@ -236,7 +236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i8.i32( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv64i8.i32( @@ -417,7 +417,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i16.i32( @@ -462,7 +462,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i16.i32( @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i16.i32( @@ -552,7 +552,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i16.i32( @@ -643,7 +643,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i32.i32( @@ -688,7 +688,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i32.i32( @@ -733,7 +733,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i32.i32( @@ -778,7 +778,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i32.i32( @@ -914,7 +914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f16.i32( @@ -959,7 +959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f16.i32( @@ -1004,7 +1004,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f16.i32( @@ -1049,7 +1049,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32f16.i32( @@ -1140,7 +1140,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f32.i32( @@ -1185,7 +1185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f32.i32( @@ -1230,7 +1230,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f32.i32( @@ -1275,7 +1275,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i32( @@ -1321,7 +1321,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f64.i32( @@ -1366,7 +1366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f64.i32( @@ -1411,7 +1411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f64.i32( @@ -1456,7 +1456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f64.i32( @@ -1637,7 +1637,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i32( @@ -1682,7 +1682,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i32( @@ -1727,7 +1727,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i32( @@ -1772,7 +1772,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i32( @@ -1907,7 +1907,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i32( @@ -1952,7 +1952,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i32( @@ -1997,7 +1997,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i32( @@ -2042,7 +2042,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i32( @@ -2132,7 +2132,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i32( @@ -2177,7 +2177,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i32( @@ -2222,7 +2222,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i32( @@ -2267,7 +2267,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i32( @@ -2402,7 +2402,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i32( @@ -2447,7 +2447,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i32( @@ -2492,7 +2492,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i32( @@ -2537,7 +2537,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i32( @@ -2627,7 +2627,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i32( @@ -2672,7 +2672,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i32( @@ -2717,7 +2717,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i32( @@ -2762,7 +2762,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i32( @@ -2807,7 +2807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i32( @@ -2852,7 +2852,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i32( @@ -2897,7 +2897,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i32( @@ -2942,7 +2942,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i32( @@ -3081,7 +3081,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i32( @@ -3114,7 +3114,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i32( @@ -3147,7 +3147,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i32( @@ -3180,7 +3180,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i32( @@ -3279,7 +3279,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i32( @@ -3312,7 +3312,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i32( @@ -3345,7 +3345,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i32( @@ -3378,7 +3378,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i32( @@ -3444,7 +3444,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i32( @@ -3477,7 +3477,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i32( @@ -3510,7 +3510,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i32( @@ -3543,7 +3543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i32( @@ -3642,7 +3642,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i32( @@ -3675,7 +3675,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i32( @@ -3708,7 +3708,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i32( @@ -3741,7 +3741,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i32( @@ -3807,7 +3807,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i32( @@ -3840,7 +3840,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i32( @@ -3873,7 +3873,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i32( @@ -3906,7 +3906,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i32( @@ -3939,7 +3939,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i32( @@ -3972,7 +3972,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i32( @@ -4005,7 +4005,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i32( @@ -4038,7 +4038,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i8.i64( @@ -191,7 +191,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i8.i64( @@ -236,7 +236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i8.i64( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv64i8.i64( @@ -417,7 +417,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i16.i64( @@ -462,7 +462,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i16.i64( @@ -507,7 +507,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i16.i64( @@ -552,7 +552,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32i16.i64( @@ -643,7 +643,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i32.i64( @@ -688,7 +688,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i32.i64( @@ -733,7 +733,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i32.i64( @@ -778,7 +778,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16i32.i64( @@ -824,7 +824,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1i64.i64( @@ -869,7 +869,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2i64.i64( @@ -914,7 +914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4i64.i64( @@ -959,7 +959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8i64.i64( @@ -1095,7 +1095,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f16.i64( @@ -1140,7 +1140,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f16.i64( @@ -1185,7 +1185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f16.i64( @@ -1230,7 +1230,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv32f16.i64( @@ -1321,7 +1321,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f32.i64( @@ -1366,7 +1366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f32.i64( @@ -1411,7 +1411,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f32.i64( @@ -1456,7 +1456,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv16f32.i64( @@ -1502,7 +1502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv1f64.i64( @@ -1547,7 +1547,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv2f64.i64( @@ -1592,7 +1592,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv4f64.i64( @@ -1637,7 +1637,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.nxv8f64.i64( @@ -1818,7 +1818,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i64( @@ -1863,7 +1863,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i64( @@ -1908,7 +1908,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i64( @@ -1953,7 +1953,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i64( @@ -2088,7 +2088,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i64( @@ -2133,7 +2133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i64( @@ -2178,7 +2178,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i64( @@ -2223,7 +2223,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i64( @@ -2313,7 +2313,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i64( @@ -2358,7 +2358,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i64( @@ -2403,7 +2403,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i64( @@ -2448,7 +2448,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i64( @@ -2493,7 +2493,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i64.i64( @@ -2538,7 +2538,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i64.i64( @@ -2583,7 +2583,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i64.i64( @@ -2628,7 +2628,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i64.i64( @@ -2763,7 +2763,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i64( @@ -2808,7 +2808,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i64( @@ -2853,7 +2853,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i64( @@ -2898,7 +2898,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i64( @@ -2988,7 +2988,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i64( @@ -3033,7 +3033,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i64( @@ -3078,7 +3078,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i64( @@ -3123,7 +3123,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i64( @@ -3168,7 +3168,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i64( @@ -3213,7 +3213,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i64( @@ -3258,7 +3258,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i64( @@ -3303,7 +3303,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i64( @@ -3442,7 +3442,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i8.i64( @@ -3475,7 +3475,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i8.i64( @@ -3508,7 +3508,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i8.i64( @@ -3541,7 +3541,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv64i8.i64( @@ -3640,7 +3640,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i16.i64( @@ -3673,7 +3673,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i16.i64( @@ -3706,7 +3706,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i16.i64( @@ -3739,7 +3739,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32i16.i64( @@ -3805,7 +3805,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i32.i64( @@ -3838,7 +3838,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i32.i64( @@ -3871,7 +3871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i32.i64( @@ -3904,7 +3904,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16i32.i64( @@ -3937,7 +3937,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1i64.i64( @@ -3970,7 +3970,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2i64.i64( @@ -4003,7 +4003,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4i64.i64( @@ -4036,7 +4036,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8i64.i64( @@ -4135,7 +4135,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f16.i64( @@ -4168,7 +4168,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f16.i64( @@ -4201,7 +4201,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f16.i64( @@ -4234,7 +4234,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv32f16.i64( @@ -4300,7 +4300,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f32.i64( @@ -4333,7 +4333,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f32.i64( @@ -4366,7 +4366,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f32.i64( @@ -4399,7 +4399,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv16f32.i64( @@ -4432,7 +4432,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vrgather.vi v25, v8, 9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv1f64.i64( @@ -4465,7 +4465,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vrgather.vi v26, v8, 9 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv2f64.i64( @@ -4498,7 +4498,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgather.vi v28, v8, 9 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv4f64.i64( @@ -4531,7 +4531,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgather.vi v16, v8, 9 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vx.nxv8f64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgatherei16.vv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i8( @@ -191,7 +191,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i8( @@ -236,7 +236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i8( @@ -371,7 +371,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i16( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i16( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i16( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i16( @@ -597,7 +597,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i32( @@ -642,7 +642,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i32( @@ -687,7 +687,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i32( @@ -733,7 +733,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i64( @@ -778,7 +778,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i64( @@ -914,7 +914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f16( @@ -959,7 +959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f16( @@ -1004,7 +1004,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f16( @@ -1049,7 +1049,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32f16( @@ -1140,7 +1140,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f32( @@ -1185,7 +1185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f32( @@ -1230,7 +1230,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f32( @@ -1276,7 +1276,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f64( @@ -1321,7 +1321,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu ; CHECK-NEXT: vrgatherei16.vv v25, v8, v10 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i8( @@ -191,7 +191,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v12 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i8( @@ -236,7 +236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v16 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i8( @@ -371,7 +371,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i16( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i16( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i16( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32i16( @@ -597,7 +597,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i32( @@ -642,7 +642,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i32( @@ -687,7 +687,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16i32( @@ -733,7 +733,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4i64( @@ -778,7 +778,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8i64( @@ -914,7 +914,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vrgatherei16.vv v25, v8, v9 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f16( @@ -959,7 +959,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f16( @@ -1004,7 +1004,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f16( @@ -1049,7 +1049,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv32f16( @@ -1140,7 +1140,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vrgatherei16.vv v26, v8, v10 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f32( @@ -1185,7 +1185,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f32( @@ -1230,7 +1230,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv16f32( @@ -1276,7 +1276,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vrgatherei16.vv v28, v8, v12 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv4f64( @@ -1321,7 +1321,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vrgatherei16.vv v24, v8, v16 -; CHECK-NEXT: vmv8r.v v8, v24 +; CHECK-NEXT: vmv.v.v v8, v24 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.nxv8f64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( @@ -51,7 +51,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i8( @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i8( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i8( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i16( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i16( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i16( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i8( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i8( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i8( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i8( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i16( @@ -625,7 +625,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i16( @@ -666,7 +666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i16( @@ -707,7 +707,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i16( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i16.nxv4i8( @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i16.nxv8i8( @@ -912,7 +912,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i16.nxv16i8( @@ -953,7 +953,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i8( @@ -51,7 +51,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i8( @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i8( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i8( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i16( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i16( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i16( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i8( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i8( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i8( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i8( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv1i64.nxv1i32( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i64.nxv2i32( @@ -625,7 +625,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i64.nxv4i32( @@ -666,7 +666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i64.nxv8i32( @@ -748,7 +748,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv2i32.nxv2i16( @@ -789,7 +789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i32.nxv4i16( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i32.nxv8i16( @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i32.nxv16i16( @@ -994,7 +994,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vsext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv4i16.nxv4i8( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vsext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv8i16.nxv8i8( @@ -1076,7 +1076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vsext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv16i16.nxv16i8( @@ -1117,7 +1117,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vsext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i8.i8( @@ -191,7 +191,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i8.i8( @@ -236,7 +236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i8.i8( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv64i8.i8( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i16.i16( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i16.i16( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i16.i16( @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i16.i16( @@ -641,7 +641,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i32.i32( @@ -686,7 +686,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i32.i32( @@ -731,7 +731,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i32.i32( @@ -776,7 +776,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i32.i32( diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -146,7 +146,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i8.i8( @@ -191,7 +191,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i8.i8( @@ -236,7 +236,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i8.i8( @@ -281,7 +281,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv64i8.i8( @@ -416,7 +416,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i16.i16( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i16.i16( @@ -506,7 +506,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i16.i16( @@ -551,7 +551,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv32i16.i16( @@ -641,7 +641,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i32.i32( @@ -686,7 +686,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i32.i32( @@ -731,7 +731,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i32.i32( @@ -776,7 +776,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv16i32.i32( @@ -821,7 +821,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu ; CHECK-NEXT: vslide1up.vx v25, v8, a0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i64.i64( @@ -866,7 +866,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu ; CHECK-NEXT: vslide1up.vx v26, v8, a0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv2i64.i64( @@ -911,7 +911,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu ; CHECK-NEXT: vslide1up.vx v28, v8, a0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv4i64.i64( @@ -956,7 +956,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu ; CHECK-NEXT: vslide1up.vx v16, v8, a0 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv8i64.i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv32.ll @@ -36,7 +36,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -47,7 +47,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -114,7 +114,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -137,7 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -307,7 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec diff --git a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode-rv64.ll @@ -36,7 +36,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -47,7 +47,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -114,7 +114,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -137,7 +137,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -160,7 +160,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -233,7 +233,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu ; CHECK-NEXT: vnsrl.wi v25, v8, 0 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -270,7 +270,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu ; CHECK-NEXT: vnsrl.wi v26, v8, 0 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec @@ -307,7 +307,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu ; CHECK-NEXT: vnsrl.wi v28, v8, 0 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret %tvec = trunc %va to ret %tvec diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( @@ -51,7 +51,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i8( @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i8( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i8( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i16( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i16( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i16( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i8( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i8( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i8( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i8( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i32( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i32( @@ -625,7 +625,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i32( @@ -666,7 +666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i32( @@ -748,7 +748,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i16( @@ -789,7 +789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i16( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i16( @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i16( @@ -994,7 +994,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i16.nxv4i8( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i16.nxv8i8( @@ -1076,7 +1076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i16.nxv16i8( @@ -1117,7 +1117,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll @@ -10,7 +10,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf8 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i8( @@ -51,7 +51,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf8 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i8( @@ -92,7 +92,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf8 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i8( @@ -133,7 +133,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf8 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i8( @@ -174,7 +174,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i16( @@ -215,7 +215,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i16( @@ -256,7 +256,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i16( @@ -297,7 +297,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i16( @@ -379,7 +379,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf4 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i8( @@ -420,7 +420,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf4 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i8( @@ -461,7 +461,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf4 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i8( @@ -502,7 +502,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf4 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i8( @@ -543,7 +543,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv1i64.nxv1i32( @@ -584,7 +584,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i64.nxv2i32( @@ -625,7 +625,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i64.nxv4i32( @@ -666,7 +666,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i64.nxv8i32( @@ -748,7 +748,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv2i32.nxv2i16( @@ -789,7 +789,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i32.nxv4i16( @@ -830,7 +830,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i32.nxv8i16( @@ -871,7 +871,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i32.nxv16i16( @@ -994,7 +994,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu ; CHECK-NEXT: vzext.vf2 v25, v8 -; CHECK-NEXT: vmv1r.v v8, v25 +; CHECK-NEXT: vmv.v.v v8, v25 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv4i16.nxv4i8( @@ -1035,7 +1035,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu ; CHECK-NEXT: vzext.vf2 v26, v8 -; CHECK-NEXT: vmv2r.v v8, v26 +; CHECK-NEXT: vmv.v.v v8, v26 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv8i16.nxv8i8( @@ -1076,7 +1076,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu ; CHECK-NEXT: vzext.vf2 v28, v8 -; CHECK-NEXT: vmv4r.v v8, v28 +; CHECK-NEXT: vmv.v.v v8, v28 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv16i16.nxv16i8( @@ -1117,7 +1117,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu ; CHECK-NEXT: vzext.vf2 v16, v8 -; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.nxv32i16.nxv32i8( diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll --- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll +++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll @@ -13,7 +13,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16,m4,ta,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0) -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -34,7 +34,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16,m4,ta,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1 -; CHECK-NEXT: vmv4r.v v8, v4 +; CHECK-NEXT: vmv.v.v v8, v4 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t ; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4 @@ -54,7 +54,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16,m4,ta,mu ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv.v.v v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16 @@ -75,7 +75,7 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 0, e16,m4,ta,mu ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8 -; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv.v.v v16, v12 ; CHECK-NEXT: vsetvli zero, zero, e16,m4,tu,mu ; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t ; CHECK-NEXT: vmv4r.v v8, v16