diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -894,6 +894,21 @@ return Reg - RISCV::F0_D + RISCV::F0_F; } +static MCRegister convertVRToVRMx(const MCRegisterInfo &RI, MCRegister Reg, + unsigned Kind) { + unsigned RegClassID; + if (Kind == MCK_VRM2) + RegClassID = RISCV::VRM2RegClassID; + else if (Kind == MCK_VRM4) + RegClassID = RISCV::VRM4RegClassID; + else if (Kind == MCK_VRM8) + RegClassID = RISCV::VRM8RegClassID; + else + return 0; + return RI.getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, + &RISCVMCRegisterClasses[RegClassID]); +} + unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, unsigned Kind) { RISCVOperand &Op = static_cast(AsmOp); @@ -905,6 +920,7 @@ RISCVMCRegisterClasses[RISCV::FPR64RegClassID].contains(Reg); bool IsRegFPR64C = RISCVMCRegisterClasses[RISCV::FPR64CRegClassID].contains(Reg); + bool IsRegVR = RISCVMCRegisterClasses[RISCV::VRRegClassID].contains(Reg); // As the parser couldn't differentiate an FPR32 from an FPR64, coerce the // register from FPR64 to FPR32 or FPR64C to FPR32C if necessary. @@ -919,6 +935,14 @@ Op.Reg.RegNum = convertFPR64ToFPR16(Reg); return Match_Success; } + // As the parser couldn't differentiate an VRM2/VRM4/VRM8 from an VR, coerce + // the register from VR to VRM2/VRM4/VRM8 if necessary. + if (IsRegVR && (Kind == MCK_VRM2 || Kind == MCK_VRM4 || Kind == MCK_VRM8)) { + Op.Reg.RegNum = convertVRToVRMx(*getContext().getRegisterInfo(), Reg, Kind); + if (Op.Reg.RegNum == 0) + return Match_InvalidOperand; + return Match_Success; + } return Match_InvalidOperand; } diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -172,6 +172,66 @@ return MCDisassembler::Success; } +static DecodeStatus DecodeVRM2RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 32) + return MCDisassembler::Fail; + + if (RegNo % 2) + return MCDisassembler::Fail; + + const RISCVDisassembler *Dis = + static_cast(Decoder); + const MCRegisterInfo *RI = Dis->getContext().getRegisterInfo(); + MCRegister Reg = + RI->getMatchingSuperReg(RISCV::V0 + RegNo, RISCV::sub_vrm1_0, + &RISCVMCRegisterClasses[RISCV::VRM2RegClassID]); + + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeVRM4RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 32) + return MCDisassembler::Fail; + + if (RegNo % 4) + return MCDisassembler::Fail; + + const RISCVDisassembler *Dis = + static_cast(Decoder); + const MCRegisterInfo *RI = Dis->getContext().getRegisterInfo(); + MCRegister Reg = + RI->getMatchingSuperReg(RISCV::V0 + RegNo, RISCV::sub_vrm1_0, + &RISCVMCRegisterClasses[RISCV::VRM4RegClassID]); + + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeVRM8RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 32) + return MCDisassembler::Fail; + + if (RegNo % 8) + return MCDisassembler::Fail; + + const RISCVDisassembler *Dis = + static_cast(Decoder); + const MCRegisterInfo *RI = Dis->getContext().getRegisterInfo(); + MCRegister Reg = + RI->getMatchingSuperReg(RISCV::V0 + RegNo, RISCV::sub_vrm1_0, + &RISCVMCRegisterClasses[RISCV::VRM8RegClassID]); + + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus decodeVMaskReg(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { MCRegister Reg = RISCV::NoRegister; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -110,9 +110,9 @@ "$vd, (${rs1}), $vs2$vm">; // vlr.v vd, (rs1) -class VWholeLoad nf, RISCVWidth width, string opcodestr> +class VWholeLoad nf, RISCVWidth width, string opcodestr, RegisterClass VRC> : RVInstVLU { let vm = 1; let Uses = []; @@ -169,9 +169,9 @@ opcodestr, "$vs3, (${rs1}), $vs2$vm">; // vsr.v vd, (rs1) -class VWholeStore nf, string opcodestr> +class VWholeStore nf, string opcodestr, RegisterClass VRC> : RVInstVSU { let vm = 1; let Uses = []; @@ -430,11 +430,11 @@ def _UNWD : VAMONoWd; } -multiclass VWholeLoad nf, string opcodestr> { - def E8_V : VWholeLoad; - def E16_V : VWholeLoad; - def E32_V : VWholeLoad; - def E64_V : VWholeLoad; +multiclass VWholeLoad nf, string opcodestr, RegisterClass VRC> { + def E8_V : VWholeLoad; + def E16_V : VWholeLoad; + def E32_V : VWholeLoad; + def E64_V : VWholeLoad; } //===----------------------------------------------------------------------===// @@ -504,19 +504,19 @@ def VSOXEI32_V : VIndexedStore; def VSOXEI64_V : VIndexedStore; -defm VL1R : VWholeLoad<1, "vl1r">; -defm VL2R : VWholeLoad<2, "vl2r">; -defm VL4R : VWholeLoad<4, "vl4r">; -defm VL8R : VWholeLoad<8, "vl8r">; +defm VL1R : VWholeLoad<1, "vl1r", VR>; +defm VL2R : VWholeLoad<2, "vl2r", VRM2>; +defm VL4R : VWholeLoad<4, "vl4r", VRM4>; +defm VL8R : VWholeLoad<8, "vl8r", VRM8>; def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>; -def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>; -def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>; -def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>; - -def VS1R_V : VWholeStore<1, "vs1r.v">; -def VS2R_V : VWholeStore<2, "vs2r.v">; -def VS4R_V : VWholeStore<4, "vs4r.v">; -def VS8R_V : VWholeStore<8, "vs8r.v">; +def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>; +def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>; +def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>; + +def VS1R_V : VWholeStore<1, "vs1r.v", VR>; +def VS2R_V : VWholeStore<2, "vs2r.v", VRM2>; +def VS4R_V : VWholeStore<4, "vs4r.v", VRM4>; +def VS8R_V : VWholeStore<8, "vs8r.v", VRM8>; // Vector Single-Width Integer Add and Subtract defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -177,14 +177,16 @@ defset list AllVectors = { defset list AllIntegerVectors = { defset list NoGroupIntegerVectors = { - def VI8MF8: VTypeInfo; - def VI8MF4: VTypeInfo; - def VI8MF2: VTypeInfo; + defset list FractionalGroupIntegerVectors = { + def VI8MF8: VTypeInfo; + def VI8MF4: VTypeInfo; + def VI8MF2: VTypeInfo; + def VI16MF4: VTypeInfo; + def VI16MF2: VTypeInfo; + def VI32MF2: VTypeInfo; + } def VI8M1: VTypeInfo; - def VI16MF4: VTypeInfo; - def VI16MF2: VTypeInfo; def VI16M1: VTypeInfo; - def VI32MF2: VTypeInfo; def VI32M1: VTypeInfo; def VI64M1: VTypeInfo; } @@ -209,13 +211,13 @@ defset list AllFloatVectors = { defset list NoGroupFloatVectors = { - def VF16MF4: VTypeInfo; - def VF16MF2: VTypeInfo; + defset list FractionalGroupFloatVectors = { + def VF16MF4: VTypeInfo; + def VF16MF2: VTypeInfo; + def VF32MF2: VTypeInfo; + } def VF16M1: VTypeInfo; - - def VF32MF2: VTypeInfo; def VF32M1: VTypeInfo; - def VF64M1: VTypeInfo; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -61,6 +61,30 @@ (store_instr reg_class:$rs2, RVVBaseAddr:$rs1, avl, sew)>; } +multiclass VPatUSLoadStoreWholeVRSDNode +{ + defvar load_instr = + !cond(!eq(vlmul.value, V_M1.value): !cast("VL1RE"#sew#"_V"), + !eq(vlmul.value, V_M2.value): !cast("VL2RE"#sew#"_V"), + !eq(vlmul.value, V_M4.value): !cast("VL4RE"#sew#"_V"), + !eq(vlmul.value, V_M8.value): !cast("VL8RE"#sew#"_V")); + defvar store_instr = + !cond(!eq(vlmul.value, V_M1.value): VS1R_V, + !eq(vlmul.value, V_M2.value): VS2R_V, + !eq(vlmul.value, V_M4.value): VS4R_V, + !eq(vlmul.value, V_M8.value): VS8R_V); + + // Load + def : Pat<(type (load RVVBaseAddr:$rs1)), + (load_instr RVVBaseAddr:$rs1)>; + // Store + def : Pat<(store type:$rs2, RVVBaseAddr:$rs1), + (store_instr reg_class:$rs2, RVVBaseAddr:$rs1)>; +} + multiclass VPatUSLoadStoreMaskSDNode { defvar load_instr = !cast("PseudoVLE1_V_"#m.BX); @@ -362,9 +386,16 @@ let Predicates = [HasStdExtV] in { // 7.4. Vector Unit-Stride Instructions -foreach vti = AllVectors in +foreach vti = !listconcat(FractionalGroupIntegerVectors, + FractionalGroupFloatVectors) in defm "" : VPatUSLoadStoreSDNode; +foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in + defm "" : VPatUSLoadStoreWholeVRSDNode; +foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in + defm "" : VPatUSLoadStoreWholeVRSDNode; foreach mti = AllMasks in defm "" : VPatUSLoadStoreMaskSDNode; diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -151,6 +151,34 @@ return true; } +static bool isRVVWholeLoadStore(unsigned Opcode) { + switch (Opcode) { + default: + return false; + case RISCV::VS1R_V: + case RISCV::VS2R_V: + case RISCV::VS4R_V: + case RISCV::VS8R_V: + case RISCV::VL1RE8_V: + case RISCV::VL2RE8_V: + case RISCV::VL4RE8_V: + case RISCV::VL8RE8_V: + case RISCV::VL1RE16_V: + case RISCV::VL2RE16_V: + case RISCV::VL4RE16_V: + case RISCV::VL8RE16_V: + case RISCV::VL1RE32_V: + case RISCV::VL2RE32_V: + case RISCV::VL4RE32_V: + case RISCV::VL8RE32_V: + case RISCV::VL1RE64_V: + case RISCV::VL2RE64_V: + case RISCV::VL4RE64_V: + case RISCV::VL8RE64_V: + return true; + } +} + void RISCVRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS) const { @@ -166,8 +194,9 @@ Register FrameReg; StackOffset Offset = getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg); - const auto *RVVInfo = RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()); - if (!RVVInfo) + bool isRVV = RISCVVPseudosTable::getPseudoInfo(MI.getOpcode()) || + isRVVWholeLoadStore(MI.getOpcode()); + if (!isRVV) Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm()); if (!isInt<32>(Offset.getFixed())) { @@ -195,7 +224,7 @@ // Offset = (fixed offset, 0) MI.getOperand(FIOperandNum) .ChangeToRegister(FrameReg, false, false, FrameRegIsKill); - if (!RVVInfo) + if (!isRVV) MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed()); else { if (Offset.getFixed()) { @@ -226,7 +255,7 @@ .addReg(FrameReg, getKillRegState(FrameRegIsKill)) .addReg(FactorRegister, RegState::Kill); - if (RVVInfo && Offset.getFixed()) { + if (isRVV && Offset.getFixed()) { // Scalable load/store has no immediate argument. We need to add the // fixed part into the load/store base address. BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), VL) @@ -236,7 +265,7 @@ // 3. Replace address register with calculated address register MI.getOperand(FIOperandNum).ChangeToRegister(VL, false, false, true); - if (!RVVInfo) + if (!isRVV) MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset.getFixed()); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll --- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll +++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll @@ -7,9 +7,8 @@ ; RV64IV: # %bb.0: ; RV64IV-NEXT: addi sp, sp, -528 ; RV64IV-NEXT: .cfi_def_cfa_offset 528 -; RV64IV-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; RV64IV-NEXT: addi a1, sp, 8 -; RV64IV-NEXT: vle64.v v8, (a1) +; RV64IV-NEXT: vl1re64.v v8, (a1) ; RV64IV-NEXT: ld a1, 520(sp) ; RV64IV-NEXT: sd a1, 0(a0) ; RV64IV-NEXT: addi sp, sp, 528 @@ -35,15 +34,14 @@ ; RV64IV-NEXT: .cfi_def_cfa_offset 528 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e64,m1,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: addi a0, a0, 8 -; RV64IV-NEXT: vle64.v v25, (a0) +; RV64IV-NEXT: vl1re64.v v25, (a0) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: ld a0, 520(a0) -; RV64IV-NEXT: vle64.v v26, (sp) +; RV64IV-NEXT: vl1re64.v v26, (sp) ; RV64IV-NEXT: vsetvli a0, a0, e64,m1,ta,mu ; RV64IV-NEXT: vadd.vv v8, v25, v26 ; RV64IV-NEXT: csrr a0, vlenb diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll @@ -20,16 +20,13 @@ ret void } -; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -; PRE-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) +; PRE-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8) +; PRE-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8) ; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype -; PRE-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +; PRE-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8) +; POST-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8) +; POST-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8) ; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -; POST-INSERTER: dead %7:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype -; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +; POST-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8) diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll @@ -7,11 +7,11 @@ define void @vadd_vint16m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m1,ta,mu -; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vle16.v v26, (a2) +; CHECK-NEXT: vl1re16.v v25, (a1) +; CHECK-NEXT: vl1re16.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse16.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint16m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vle16.v v28, (a2) +; CHECK-NEXT: vl2re16.v v26, (a1) +; CHECK-NEXT: vl2re16.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse16.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint16m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vle16.v v8, (a2) +; CHECK-NEXT: vl4re16.v v28, (a1) +; CHECK-NEXT: vl4re16.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse16.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint16m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v16, (a2) +; CHECK-NEXT: vl8re16.v v8, (a1) +; CHECK-NEXT: vl8re16.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll @@ -7,11 +7,11 @@ define void @vadd_vint32m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu -; CHECK-NEXT: vle32.v v25, (a1) -; CHECK-NEXT: vle32.v v26, (a2) +; CHECK-NEXT: vl1re32.v v25, (a1) +; CHECK-NEXT: vl1re32.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint32m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m2,ta,mu -; CHECK-NEXT: vle32.v v26, (a1) -; CHECK-NEXT: vle32.v v28, (a2) +; CHECK-NEXT: vl2re32.v v26, (a1) +; CHECK-NEXT: vl2re32.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse32.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint32m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vle32.v v8, (a2) +; CHECK-NEXT: vl4re32.v v28, (a1) +; CHECK-NEXT: vl4re32.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse32.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint32m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vle32.v v16, (a2) +; CHECK-NEXT: vl8re32.v v8, (a1) +; CHECK-NEXT: vl8re32.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll @@ -7,11 +7,11 @@ define void @vadd_vint64m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m1,ta,mu -; CHECK-NEXT: vle64.v v25, (a1) -; CHECK-NEXT: vle64.v v26, (a2) +; CHECK-NEXT: vl1re64.v v25, (a1) +; CHECK-NEXT: vl1re64.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint64m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m2,ta,mu -; CHECK-NEXT: vle64.v v26, (a1) -; CHECK-NEXT: vle64.v v28, (a2) +; CHECK-NEXT: vl2re64.v v26, (a1) +; CHECK-NEXT: vl2re64.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse64.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint64m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m4,ta,mu -; CHECK-NEXT: vle64.v v28, (a1) -; CHECK-NEXT: vle64.v v8, (a2) +; CHECK-NEXT: vl4re64.v v28, (a1) +; CHECK-NEXT: vl4re64.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse64.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint64m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: vle64.v v16, (a2) +; CHECK-NEXT: vl8re64.v v8, (a1) +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll @@ -7,11 +7,11 @@ define void @vadd_vint8m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu -; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vle8.v v26, (a2) +; CHECK-NEXT: vl1r.v v25, (a1) +; CHECK-NEXT: vl1r.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint8m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu -; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vle8.v v28, (a2) +; CHECK-NEXT: vl2r.v v26, (a1) +; CHECK-NEXT: vl2r.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse8.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint8m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vle8.v v8, (a2) +; CHECK-NEXT: vl4r.v v28, (a1) +; CHECK-NEXT: vl4r.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse8.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint8m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v16, (a2) +; CHECK-NEXT: vl8r.v v8, (a1) +; CHECK-NEXT: vl8r.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/localvar.ll b/llvm/test/CodeGen/RISCV/rvv/localvar.ll --- a/llvm/test/CodeGen/RISCV/rvv/localvar.ll +++ b/llvm/test/CodeGen/RISCV/rvv/localvar.ll @@ -32,11 +32,10 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e8,m1,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v25, (a0) -; RV64IV-NEXT: vle8.v v25, (sp) +; RV64IV-NEXT: vl1r.v v25, (a0) +; RV64IV-NEXT: vl1r.v v25, (sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add sp, sp, a0 @@ -55,12 +54,11 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v26, (a0) -; RV64IV-NEXT: vle8.v v26, (sp) +; RV64IV-NEXT: vl2r.v v26, (a0) +; RV64IV-NEXT: vl2r.v v26, (sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add sp, sp, a0 @@ -87,12 +85,11 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 3 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e8,m4,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v28, (a0) -; RV64IV-NEXT: vle8.v v28, (sp) +; RV64IV-NEXT: vl4r.v v28, (a0) +; RV64IV-NEXT: vl4r.v v28, (sp) ; RV64IV-NEXT: addi sp, s0, -32 ; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload @@ -120,12 +117,11 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 4 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e8,m8,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 3 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v8, (a0) -; RV64IV-NEXT: vle8.v v8, (sp) +; RV64IV-NEXT: vl8r.v v8, (a0) +; RV64IV-NEXT: vl8r.v v8, (sp) ; RV64IV-NEXT: addi sp, s0, -64 ; RV64IV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld ra, 56(sp) # 8-byte Folded Reload @@ -150,12 +146,11 @@ ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add a0, sp, a0 ; RV64IV-NEXT: lw a0, 12(a0) -; RV64IV-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v26, (a0) -; RV64IV-NEXT: vle8.v v26, (sp) +; RV64IV-NEXT: vl2r.v v26, (a0) +; RV64IV-NEXT: vl2r.v v26, (sp) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: add a0, sp, a0 @@ -199,17 +194,16 @@ ; RV64IV-NEXT: sub a1, s0, a1 ; RV64IV-NEXT: addi a1, a1, -16 ; RV64IV-NEXT: call notdead@plt -; RV64IV-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: sub a0, s0, a0 ; RV64IV-NEXT: addi a0, a0, -16 -; RV64IV-NEXT: vle8.v v26, (a0) +; RV64IV-NEXT: vl2r.v v26, (a0) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: sub a0, s0, a0 ; RV64IV-NEXT: addi a0, a0, -16 -; RV64IV-NEXT: vle8.v v26, (a0) +; RV64IV-NEXT: vl2r.v v26, (a0) ; RV64IV-NEXT: addi sp, s0, -16 ; RV64IV-NEXT: ld s0, 0(sp) # 8-byte Folded Reload ; RV64IV-NEXT: ld ra, 8(sp) # 8-byte Folded Reload @@ -253,15 +247,14 @@ ; RV64IV-NEXT: mv a2, a2 ; RV64IV-NEXT: call notdead2@plt ; RV64IV-NEXT: lw a0, 124(s1) -; RV64IV-NEXT: vsetvli a0, zero, e8,m2,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 1 ; RV64IV-NEXT: sub a0, s1, a0 -; RV64IV-NEXT: vle8.v v26, (a0) +; RV64IV-NEXT: vl2r.v v26, (a0) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 2 ; RV64IV-NEXT: sub a0, s1, a0 -; RV64IV-NEXT: vle8.v v26, (a0) +; RV64IV-NEXT: vl2r.v v26, (a0) ; RV64IV-NEXT: lw a0, 120(s1) ; RV64IV-NEXT: addi sp, s0, -256 ; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll --- a/llvm/test/CodeGen/RISCV/rvv/memory-args.ll +++ b/llvm/test/CodeGen/RISCV/rvv/memory-args.ll @@ -11,8 +11,7 @@ define @callee( %arg0, %arg1, %arg2) { ; RV64IV-LABEL: callee: ; RV64IV: # %bb.0: -; RV64IV-NEXT: vsetvli a1, zero, e8,m8,ta,mu -; RV64IV-NEXT: vle8.v v24, (a0) +; RV64IV-NEXT: vl8r.v v24, (a0) ; RV64IV-NEXT: addi a0, zero, 1024 ; RV64IV-NEXT: vsetvli a0, a0, e8,m8,tu,mu ; RV64IV-NEXT: vmacc.vv v8, v16, v24 @@ -39,22 +38,21 @@ ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 5 ; RV64IV-NEXT: sub sp, sp, a0 -; RV64IV-NEXT: vsetvli a0, zero, e8,m8,ta,mu ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: addi a1, zero, 24 ; RV64IV-NEXT: mul a0, a0, a1 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v8, (a0) +; RV64IV-NEXT: vl8r.v v8, (a0) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 4 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v16, (a0) +; RV64IV-NEXT: vl8r.v v16, (a0) ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: slli a0, a0, 3 ; RV64IV-NEXT: add a0, sp, a0 -; RV64IV-NEXT: vle8.v v24, (a0) +; RV64IV-NEXT: vl8r.v v24, (a0) ; RV64IV-NEXT: mv a0, sp -; RV64IV-NEXT: vse8.v v24, (sp) +; RV64IV-NEXT: vs8r.v v24, (sp) ; RV64IV-NEXT: call callee@plt ; RV64IV-NEXT: addi sp, s0, -64 ; RV64IV-NEXT: ld s0, 48(sp) # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll @@ -252,8 +252,7 @@ define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -474,8 +473,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -652,8 +650,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll @@ -137,8 +137,7 @@ define @vfmadd_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v24, v8 ; CHECK-NEXT: vmv8r.v v8, v16 @@ -266,8 +265,7 @@ define @vfmadd_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfmadd.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -367,8 +365,7 @@ define @vfmadd_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfmadd.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll @@ -147,8 +147,7 @@ define @vfmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v24, v8 ; CHECK-NEXT: vmv8r.v v8, v16 @@ -286,8 +285,7 @@ define @vfmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfmsub.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -395,8 +393,7 @@ define @vfmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfmsub.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll @@ -156,8 +156,7 @@ define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfnmadd.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -302,8 +301,7 @@ define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfnmadd.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 @@ -419,8 +417,7 @@ define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfnmadd.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll @@ -146,8 +146,7 @@ define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfnmsub.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -282,8 +281,7 @@ define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfnmsub.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 @@ -391,8 +389,7 @@ define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfnmsub.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll @@ -252,8 +252,7 @@ define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -474,8 +473,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -652,8 +650,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -302,8 +302,7 @@ define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -574,8 +573,7 @@ define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -801,8 +799,7 @@ define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1073,8 +1070,7 @@ define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1300,8 +1296,7 @@ define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1482,8 +1477,7 @@ define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -302,8 +302,7 @@ define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -574,8 +573,7 @@ define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -801,8 +799,7 @@ define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -983,8 +980,7 @@ define @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1255,8 +1251,7 @@ define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1482,8 +1477,7 @@ define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1664,8 +1658,7 @@ define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -527,8 +527,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -709,8 +708,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -981,8 +979,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1163,8 +1160,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1255,8 +1251,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vl2re16.v v26, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -527,8 +527,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -709,8 +708,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -801,8 +799,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vl2re16.v v26, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1073,8 +1070,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1255,8 +1251,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1347,8 +1342,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vl2re16.v v26, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll --- a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll +++ b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll @@ -7,9 +7,8 @@ define i32 @foo({ {, }, i32 } %x, * %y, * %z) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu -; CHECK-NEXT: vse32.v v8, (a1) -; CHECK-NEXT: vse32.v v9, (a2) +; CHECK-NEXT: vs1r.v v8, (a1) +; CHECK-NEXT: vs1r.v v9, (a2) ; CHECK-NEXT: ret entry: br label %return diff --git a/llvm/test/MC/RISCV/rvv/invalid.s b/llvm/test/MC/RISCV/rvv/invalid.s --- a/llvm/test/MC/RISCV/rvv/invalid.s +++ b/llvm/test/MC/RISCV/rvv/invalid.s @@ -589,3 +589,70 @@ vfncvt.xu.f.w v0, v4, v0.t # CHECK-ERROR: The destination vector register group cannot overlap the mask register. # CHECK-ERROR-LABEL: vfncvt.xu.f.w v0, v4, v0.t + +vl2re8.v v1, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl4re8.v v1, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl4re8.v v2, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl4re8.v v3, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v1, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v2, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v3, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v4, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v5, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v6, (a0) +# CHECK-ERROR: invalid operand for instruction + +vl8re8.v v7, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs2r.v v1, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs4r.v v1, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs4r.v v2, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs4r.v v3, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v1, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v2, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v3, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v4, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v5, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v6, (a0) +# CHECK-ERROR: invalid operand for instruction + +vs8r.v v7, (a0) +# CHECK-ERROR: invalid operand for instruction +