diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -894,6 +894,83 @@ return Reg - RISCV::F0_D + RISCV::F0_F; } +static MCRegister convertVRToVRM2(MCRegister Reg) { + switch (Reg) { + default: + llvm_unreachable("Unexpected vector registers!"); + case RISCV::V0: + return RISCV::V0M2; + case RISCV::V2: + return RISCV::V2M2; + case RISCV::V4: + return RISCV::V4M2; + case RISCV::V6: + return RISCV::V6M2; + case RISCV::V8: + return RISCV::V8M2; + case RISCV::V10: + return RISCV::V10M2; + case RISCV::V12: + return RISCV::V12M2; + case RISCV::V14: + return RISCV::V14M2; + case RISCV::V16: + return RISCV::V16M2; + case RISCV::V18: + return RISCV::V18M2; + case RISCV::V20: + return RISCV::V20M2; + case RISCV::V22: + return RISCV::V22M2; + case RISCV::V24: + return RISCV::V24M2; + case RISCV::V26: + return RISCV::V26M2; + case RISCV::V28: + return RISCV::V28M2; + case RISCV::V30: + return RISCV::V30M2; + } +} + +static MCRegister convertVRToVRM4(MCRegister Reg) { + switch (Reg) { + default: + llvm_unreachable("Unexpected vector registers!"); + case RISCV::V0: + return RISCV::V0M4; + case RISCV::V4: + return RISCV::V4M4; + case RISCV::V8: + return RISCV::V8M4; + case RISCV::V12: + return RISCV::V12M4; + case RISCV::V16: + return RISCV::V16M4; + case RISCV::V20: + return RISCV::V20M4; + case RISCV::V24: + return RISCV::V24M4; + case RISCV::V28: + return RISCV::V28M4; + } +} + +static MCRegister convertVRToVRM8(MCRegister Reg) { + switch (Reg) { + default: + llvm_unreachable("Unexpected vector registers!"); + case RISCV::V0: + return RISCV::V0M8; + case RISCV::V8: + return RISCV::V8M8; + case RISCV::V16: + return RISCV::V16M8; + case RISCV::V24: + return RISCV::V24M8; + } +} + unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, unsigned Kind) { RISCVOperand &Op = static_cast(AsmOp); @@ -905,6 +982,8 @@ RISCVMCRegisterClasses[RISCV::FPR64RegClassID].contains(Reg); bool IsRegFPR64C = RISCVMCRegisterClasses[RISCV::FPR64CRegClassID].contains(Reg); + bool IsRegVR = + RISCVMCRegisterClasses[RISCV::VRRegClassID].contains(Reg); // As the parser couldn't differentiate an FPR32 from an FPR64, coerce the // register from FPR64 to FPR32 or FPR64C to FPR32C if necessary. @@ -919,6 +998,20 @@ Op.Reg.RegNum = convertFPR64ToFPR16(Reg); return Match_Success; } + // As the parser couldn't differentiate an VRM2/VRM4/VRM8 from an VR, coerce + // the register from VR to VRM2/VRM4/VRM8 if necessary. + if (IsRegVR && Kind == MCK_VRM2) { + Op.Reg.RegNum = convertVRToVRM2(Reg); + return Match_Success; + } + if (IsRegVR && Kind == MCK_VRM4) { + Op.Reg.RegNum = convertVRToVRM4(Reg); + return Match_Success; + } + if (IsRegVR && Kind == MCK_VRM8) { + Op.Reg.RegNum = convertVRToVRM8(Reg); + return Match_Success; + } return Match_InvalidOperand; } diff --git a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp --- a/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp +++ b/llvm/lib/Target/RISCV/Disassembler/RISCVDisassembler.cpp @@ -172,6 +172,144 @@ return MCDisassembler::Success; } +static DecodeStatus DecodeVRM2RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 32) + return MCDisassembler::Fail; + + if (RegNo % 2) + return MCDisassembler::Fail; + + MCRegister Reg; + switch (RegNo) { + default: + return MCDisassembler::Fail; + case 0: + Reg = RISCV::V0M2; + break; + case 2: + Reg = RISCV::V2M2; + break; + case 4: + Reg = RISCV::V4M2; + break; + case 6: + Reg = RISCV::V6M2; + break; + case 8: + Reg = RISCV::V8M2; + break; + case 10: + Reg = RISCV::V10M2; + break; + case 12: + Reg = RISCV::V12M2; + break; + case 14: + Reg = RISCV::V14M2; + break; + case 16: + Reg = RISCV::V16M2; + break; + case 18: + Reg = RISCV::V18M2; + break; + case 20: + Reg = RISCV::V20M2; + break; + case 22: + Reg = RISCV::V22M2; + break; + case 24: + Reg = RISCV::V24M2; + break; + case 26: + Reg = RISCV::V26M2; + break; + case 28: + Reg = RISCV::V28M2; + break; + case 30: + Reg = RISCV::V30M2; + break; + } + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeVRM4RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 32) + return MCDisassembler::Fail; + + if (RegNo % 4) + return MCDisassembler::Fail; + + MCRegister Reg; + switch (RegNo) { + default: + return MCDisassembler::Fail; + case 0: + Reg = RISCV::V0M4; + break; + case 4: + Reg = RISCV::V4M4; + break; + case 8: + Reg = RISCV::V8M4; + break; + case 12: + Reg = RISCV::V12M4; + break; + case 16: + Reg = RISCV::V16M4; + break; + case 20: + Reg = RISCV::V20M4; + break; + case 24: + Reg = RISCV::V24M4; + break; + case 28: + Reg = RISCV::V28M4; + break; + } + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeVRM8RegisterClass(MCInst &Inst, uint64_t RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 32) + return MCDisassembler::Fail; + + if (RegNo % 8) + return MCDisassembler::Fail; + + MCRegister Reg; + switch (RegNo) { + default: + return MCDisassembler::Fail; + case 0: + Reg = RISCV::V0M8; + break; + case 8: + Reg = RISCV::V8M8; + break; + case 16: + Reg = RISCV::V16M8; + break; + case 24: + Reg = RISCV::V24M8; + break; + } + Inst.addOperand(MCOperand::createReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus decodeVMaskReg(MCInst &Inst, uint64_t RegNo, uint64_t Address, const void *Decoder) { MCRegister Reg = RISCV::NoRegister; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -110,9 +110,9 @@ "$vd, (${rs1}), $vs2$vm">; // vlr.v vd, (rs1) -class VWholeLoad nf, RISCVWidth width, string opcodestr> +class VWholeLoad nf, RISCVWidth width, string opcodestr, RegisterClass VRC> : RVInstVLU { let vm = 1; let Uses = []; @@ -169,9 +169,9 @@ opcodestr, "$vs3, (${rs1}), $vs2$vm">; // vsr.v vd, (rs1) -class VWholeStore nf, string opcodestr> +class VWholeStore nf, string opcodestr, RegisterClass VRC> : RVInstVSU { let vm = 1; let Uses = []; @@ -430,11 +430,11 @@ def _UNWD : VAMONoWd; } -multiclass VWholeLoad nf, string opcodestr> { - def E8_V : VWholeLoad; - def E16_V : VWholeLoad; - def E32_V : VWholeLoad; - def E64_V : VWholeLoad; +multiclass VWholeLoad nf, string opcodestr, RegisterClass VRC> { + def E8_V : VWholeLoad; + def E16_V : VWholeLoad; + def E32_V : VWholeLoad; + def E64_V : VWholeLoad; } //===----------------------------------------------------------------------===// @@ -504,19 +504,19 @@ def VSOXEI32_V : VIndexedStore; def VSOXEI64_V : VIndexedStore; -defm VL1R : VWholeLoad<1, "vl1r">; -defm VL2R : VWholeLoad<2, "vl2r">; -defm VL4R : VWholeLoad<4, "vl4r">; -defm VL8R : VWholeLoad<8, "vl8r">; +defm VL1R : VWholeLoad<1, "vl1r", VR>; +defm VL2R : VWholeLoad<2, "vl2r", VRM2>; +defm VL4R : VWholeLoad<4, "vl4r", VRM4>; +defm VL8R : VWholeLoad<8, "vl8r", VRM8>; def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>; -def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>; -def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>; -def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>; - -def VS1R_V : VWholeStore<1, "vs1r.v">; -def VS2R_V : VWholeStore<2, "vs2r.v">; -def VS4R_V : VWholeStore<4, "vs4r.v">; -def VS8R_V : VWholeStore<8, "vs8r.v">; +def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>; +def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>; +def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>; + +def VS1R_V : VWholeStore<1, "vs1r.v", VR>; +def VS2R_V : VWholeStore<2, "vs2r.v", VRM2>; +def VS4R_V : VWholeStore<4, "vs4r.v", VRM4>; +def VS8R_V : VWholeStore<8, "vs8r.v", VRM8>; // Vector Single-Width Integer Add and Subtract defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -177,14 +177,16 @@ defset list AllVectors = { defset list AllIntegerVectors = { defset list NoGroupIntegerVectors = { + defset list FractionalGroupIntegerVectors = { def VI8MF8: VTypeInfo; def VI8MF4: VTypeInfo; def VI8MF2: VTypeInfo; - def VI8M1: VTypeInfo; def VI16MF4: VTypeInfo; def VI16MF2: VTypeInfo; - def VI16M1: VTypeInfo; def VI32MF2: VTypeInfo; + } + def VI8M1: VTypeInfo; + def VI16M1: VTypeInfo; def VI32M1: VTypeInfo; def VI64M1: VTypeInfo; } @@ -209,13 +211,13 @@ defset list AllFloatVectors = { defset list NoGroupFloatVectors = { + defset list FractionalGroupFloatVectors = { def VF16MF4: VTypeInfo; def VF16MF2: VTypeInfo; - def VF16M1: VTypeInfo; - def VF32MF2: VTypeInfo; + } + def VF16M1: VTypeInfo; def VF32M1: VTypeInfo; - def VF64M1: VTypeInfo; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -61,6 +61,30 @@ (store_instr reg_class:$rs2, RVVBaseAddr:$rs1, avl, sew)>; } +multiclass VPatUSLoadStoreWholeVRSDNode +{ + defvar load_instr = + !cond(!eq(vlmul.value, V_M1.value): !cast("VL1RE"#sew#"_V"), + !eq(vlmul.value, V_M2.value): !cast("VL2RE"#sew#"_V"), + !eq(vlmul.value, V_M4.value): !cast("VL4RE"#sew#"_V"), + !eq(vlmul.value, V_M8.value): !cast("VL8RE"#sew#"_V")); + defvar store_instr = + !cond(!eq(vlmul.value, V_M1.value): VS1R_V, + !eq(vlmul.value, V_M2.value): VS2R_V, + !eq(vlmul.value, V_M4.value): VS4R_V, + !eq(vlmul.value, V_M8.value): VS8R_V); + + // Load + def : Pat<(type (load RVVBaseAddr:$rs1)), + (load_instr RVVBaseAddr:$rs1)>; + // Store + def : Pat<(store type:$rs2, RVVBaseAddr:$rs1), + (store_instr reg_class:$rs2, RVVBaseAddr:$rs1)>; +} + multiclass VPatUSLoadStoreMaskSDNode { defvar load_instr = !cast("PseudoVLE1_V_"#m.BX); @@ -362,9 +386,16 @@ let Predicates = [HasStdExtV] in { // 7.4. Vector Unit-Stride Instructions -foreach vti = AllVectors in +foreach vti = !listconcat(FractionalGroupIntegerVectors, + FractionalGroupFloatVectors) in defm "" : VPatUSLoadStoreSDNode; +foreach vti = [VI8M1, VI16M1, VI32M1, VI64M1, VF16M1, VF32M1, VF64M1] in + defm "" : VPatUSLoadStoreWholeVRSDNode; +foreach vti = !listconcat(GroupIntegerVectors, GroupFloatVectors) in + defm "" : VPatUSLoadStoreWholeVRSDNode; foreach mti = AllMasks in defm "" : VPatUSLoadStoreMaskSDNode; diff --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll --- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll +++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll @@ -20,16 +20,13 @@ ret void } -; PRE-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -; PRE-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $x0, 64, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) +; PRE-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8) +; PRE-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8) ; PRE-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $x0, 64, implicit $vl, implicit $vtype -; PRE-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $x0, 64, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +; PRE-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8) +; POST-INSERTER: %3:vr = VL1RE64_V %1 :: (load unknown-size from %ir.pa, align 8) +; POST-INSERTER: %4:vr = VL1RE64_V %2 :: (load unknown-size from %ir.pb, align 8) ; POST-INSERTER: dead %6:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %3:vr = PseudoVLE64_V_M1 %1, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pa, align 8) -; POST-INSERTER: dead %7:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: %4:vr = PseudoVLE64_V_M1 %2, $noreg, -1, implicit $vl, implicit $vtype :: (load unknown-size from %ir.pb, align 8) -; POST-INSERTER: dead %8:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype ; POST-INSERTER: %5:vr = PseudoVADD_VV_M1 killed %3, killed %4, $noreg, -1, implicit $vl, implicit $vtype -; POST-INSERTER: dead %9:gpr = PseudoVSETVLI $x0, 88, implicit-def $vl, implicit-def $vtype -; POST-INSERTER: PseudoVSE64_V_M1 killed %5, %0, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8) +; POST-INSERTER: VS1R_V killed %5, %0 :: (store unknown-size into %ir.pc, align 8) diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll @@ -7,11 +7,11 @@ define void @vadd_vint16m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m1,ta,mu -; CHECK-NEXT: vle16.v v25, (a1) -; CHECK-NEXT: vle16.v v26, (a2) +; CHECK-NEXT: vl1re16.v v25, (a1) +; CHECK-NEXT: vl1re16.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse16.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint16m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a1) -; CHECK-NEXT: vle16.v v28, (a2) +; CHECK-NEXT: vl2re16.v v26, (a1) +; CHECK-NEXT: vl2re16.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse16.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint16m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a1) -; CHECK-NEXT: vle16.v v8, (a2) +; CHECK-NEXT: vl4re16.v v28, (a1) +; CHECK-NEXT: vl4re16.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse16.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint16m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint16m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v16, (a2) +; CHECK-NEXT: vl8re16.v v8, (a1) +; CHECK-NEXT: vl8re16.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse16.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll @@ -7,11 +7,11 @@ define void @vadd_vint32m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu -; CHECK-NEXT: vle32.v v25, (a1) -; CHECK-NEXT: vle32.v v26, (a2) +; CHECK-NEXT: vl1re32.v v25, (a1) +; CHECK-NEXT: vl1re32.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint32m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m2,ta,mu -; CHECK-NEXT: vle32.v v26, (a1) -; CHECK-NEXT: vle32.v v28, (a2) +; CHECK-NEXT: vl2re32.v v26, (a1) +; CHECK-NEXT: vl2re32.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse32.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint32m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a1) -; CHECK-NEXT: vle32.v v8, (a2) +; CHECK-NEXT: vl4re32.v v28, (a1) +; CHECK-NEXT: vl4re32.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse32.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint32m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint32m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v8, (a1) -; CHECK-NEXT: vle32.v v16, (a2) +; CHECK-NEXT: vl8re32.v v8, (a1) +; CHECK-NEXT: vl8re32.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll @@ -7,11 +7,11 @@ define void @vadd_vint64m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m1,ta,mu -; CHECK-NEXT: vle64.v v25, (a1) -; CHECK-NEXT: vle64.v v26, (a2) +; CHECK-NEXT: vl1re64.v v25, (a1) +; CHECK-NEXT: vl1re64.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse64.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint64m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m2,ta,mu -; CHECK-NEXT: vle64.v v26, (a1) -; CHECK-NEXT: vle64.v v28, (a2) +; CHECK-NEXT: vl2re64.v v26, (a1) +; CHECK-NEXT: vl2re64.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse64.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint64m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m4,ta,mu -; CHECK-NEXT: vle64.v v28, (a1) -; CHECK-NEXT: vle64.v v8, (a2) +; CHECK-NEXT: vl4re64.v v28, (a1) +; CHECK-NEXT: vl4re64.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse64.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint64m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint64m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v8, (a1) -; CHECK-NEXT: vle64.v v16, (a2) +; CHECK-NEXT: vl8re64.v v8, (a1) +; CHECK-NEXT: vl8re64.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse64.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll --- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll +++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll @@ -7,11 +7,11 @@ define void @vadd_vint8m1( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu -; CHECK-NEXT: vle8.v v25, (a1) -; CHECK-NEXT: vle8.v v26, (a2) +; CHECK-NEXT: vl1r.v v25, (a1) +; CHECK-NEXT: vl1r.v v26, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu ; CHECK-NEXT: vadd.vv v25, v25, v26 -; CHECK-NEXT: vse8.v v25, (a0) +; CHECK-NEXT: vs1r.v v25, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -23,11 +23,11 @@ define void @vadd_vint8m2( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu -; CHECK-NEXT: vle8.v v26, (a1) -; CHECK-NEXT: vle8.v v28, (a2) +; CHECK-NEXT: vl2r.v v26, (a1) +; CHECK-NEXT: vl2r.v v28, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu ; CHECK-NEXT: vadd.vv v26, v26, v28 -; CHECK-NEXT: vse8.v v26, (a0) +; CHECK-NEXT: vs2r.v v26, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -39,11 +39,11 @@ define void @vadd_vint8m4( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a1) -; CHECK-NEXT: vle8.v v8, (a2) +; CHECK-NEXT: vl4r.v v28, (a1) +; CHECK-NEXT: vl4r.v v8, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu ; CHECK-NEXT: vadd.vv v28, v28, v8 -; CHECK-NEXT: vse8.v v28, (a0) +; CHECK-NEXT: vs4r.v v28, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb @@ -55,11 +55,11 @@ define void @vadd_vint8m8( *%pc, *%pa, *%pb) nounwind { ; CHECK-LABEL: vadd_vint8m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v16, (a2) +; CHECK-NEXT: vl8r.v v8, (a1) +; CHECK-NEXT: vl8r.v v16, (a2) +; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu ; CHECK-NEXT: vadd.vv v8, v8, v16 -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %va = load , * %pa %vb = load , * %pb diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaadd-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vaadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vaaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vaaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vand.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasub-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vasub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vasubu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vasubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vasubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vdivu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vdivu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-rv64.ll @@ -252,8 +252,7 @@ define @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -474,8 +473,7 @@ define @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -652,8 +650,7 @@ define @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfdiv.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll @@ -137,8 +137,7 @@ define @vfmadd_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfmadd.vv v16, v24, v8 ; CHECK-NEXT: vmv8r.v v8, v16 @@ -266,8 +265,7 @@ define @vfmadd_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfmadd.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -367,8 +365,7 @@ define @vfmadd_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmadd_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfmadd.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-sdnode.ll @@ -147,8 +147,7 @@ define @vfmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfmsub.vv v16, v24, v8 ; CHECK-NEXT: vmv8r.v v8, v16 @@ -286,8 +285,7 @@ define @vfmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfmsub.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -395,8 +393,7 @@ define @vfmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfmsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfmsub.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-sdnode.ll @@ -156,8 +156,7 @@ define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfnmadd.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -302,8 +301,7 @@ define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfnmadd.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 @@ -419,8 +417,7 @@ define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfnmadd.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-sdnode.ll @@ -146,8 +146,7 @@ define @vfnmsub_vv_nxv32f16( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv32f16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu ; CHECK-NEXT: vfnmsub.vv v8, v24, v16 ; CHECK-NEXT: ret @@ -282,8 +281,7 @@ define @vfnmsub_vv_nxv16f32( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv16f32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu ; CHECK-NEXT: vfnmsub.vv v24, v8, v16 ; CHECK-NEXT: vmv8r.v v8, v24 @@ -391,8 +389,7 @@ define @vfnmsub_vv_nxv8f64( %va, %vb, %vc) { ; CHECK-LABEL: vfnmsub_vv_nxv8f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu ; CHECK-NEXT: vfnmsub.vv v24, v16, v8 ; CHECK-NEXT: vmv8r.v v8, v24 diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll @@ -251,8 +251,7 @@ define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -473,8 +472,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -651,8 +649,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll @@ -252,8 +252,7 @@ define @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -474,8 +473,7 @@ define @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -652,8 +650,7 @@ define @intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f64_nxv8f64_nxv8f64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -212,8 +212,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -394,8 +393,7 @@ define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vfwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmax_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmax.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmaxu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmaxu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmin.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vminu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vminu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vminu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulh-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulh_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmulh.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhsu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhsu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmulhsu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmulhu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vmulhu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vmulhu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrem_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrem.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vremu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vremu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll @@ -302,8 +302,7 @@ define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -574,8 +573,7 @@ define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -801,8 +799,7 @@ define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1073,8 +1070,7 @@ define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1300,8 +1296,7 @@ define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1482,8 +1477,7 @@ define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll @@ -302,8 +302,7 @@ define @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -574,8 +573,7 @@ define @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -801,8 +799,7 @@ define @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -983,8 +980,7 @@ define @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1255,8 +1251,7 @@ define @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1482,8 +1477,7 @@ define @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1664,8 +1658,7 @@ define @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv32.ll @@ -527,8 +527,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -709,8 +708,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -981,8 +979,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1163,8 +1160,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1255,8 +1251,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vl2re16.v v26, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-rv64.ll @@ -527,8 +527,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -709,8 +708,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -801,8 +799,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8i64_nxv8i64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vl2re16.v v26, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1073,8 +1070,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv32f16_nxv32f16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1255,8 +1251,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv16f32_nxv16f32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -1347,8 +1342,7 @@ define @intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv8f64_nxv8f64_nxv8i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m2,ta,mu -; CHECK-NEXT: vle16.v v26, (a0) +; CHECK-NEXT: vl2re16.v v26, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vrgatherei16.vv v8, v16, v26, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsll.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssra-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssra_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssra.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssrl-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssrl.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vsub.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwadd.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwaddu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwsub.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll @@ -257,8 +257,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu -; CHECK-NEXT: vle8.v v28, (a0) +; CHECK-NEXT: vl4re8.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -484,8 +483,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu -; CHECK-NEXT: vle16.v v28, (a0) +; CHECK-NEXT: vl4re16.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -666,8 +664,7 @@ define @intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i64_nxv8i64_nxv8i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu -; CHECK-NEXT: vle32.v v28, (a0) +; CHECK-NEXT: vl4re32.v v28, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu ; CHECK-NEXT: vwsubu.wv v8, v16, v28, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv32.ll @@ -295,8 +295,7 @@ define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-rv64.ll @@ -295,8 +295,7 @@ define @intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv64i8_nxv64i8_nxv64i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu -; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vl8re8.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e8,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -561,8 +560,7 @@ define @intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv32i16_nxv32i16_nxv32i16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu -; CHECK-NEXT: vle16.v v24, (a0) +; CHECK-NEXT: vl8re16.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e16,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -783,8 +781,7 @@ define @intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv16i32_nxv16i32_nxv16i32: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu -; CHECK-NEXT: vle32.v v24, (a0) +; CHECK-NEXT: vl8re32.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e32,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) @@ -961,8 +958,7 @@ define @intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vxor_mask_vv_nxv8i64_nxv8i64_nxv8i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu -; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vl8re64.v v24, (a0) ; CHECK-NEXT: vsetvli a0, a1, e64,m8,tu,mu ; CHECK-NEXT: vxor.vv v8, v16, v24, v0.t ; CHECK-NEXT: jalr zero, 0(ra) diff --git a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll --- a/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll +++ b/llvm/test/CodeGen/RISCV/scalable-vector-struct.ll @@ -7,9 +7,8 @@ define i32 @foo({ {, }, i32 } %x, * %y, * %z) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, zero, e32,m1,ta,mu -; CHECK-NEXT: vse32.v v8, (a1) -; CHECK-NEXT: vse32.v v9, (a2) +; CHECK-NEXT: vs1r.v v8, (a1) +; CHECK-NEXT: vs1r.v v9, (a2) ; CHECK-NEXT: ret entry: br label %return