diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2210,6 +2210,18 @@ // node in order to try and match RVV vector/scalar instructions. if ((LoC >> 31) == HiC) return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL); + + // If vl is equal to VLMax and Hi constant is equal to Lo, we could use + // vmv.v.x whose EEW = 32 to lower it. + auto *Const = dyn_cast(VL); + if (LoC == HiC && Const && Const->getSExtValue() == RISCV::VLMaxSentinel) { + auto InterVT = + MVT::getScalableVectorVT(MVT::i32, 2 * VT.getVectorNumElements()); + // TODO: if vl <= min(VLMAX), we could also do this. But we could not + // access the subtarget here now. + auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT, Lo, VL); + return DAG.getNode(ISD::BITCAST, DL, VT, InterVec); + } } // Fall back to a stack store and stride x0 vector load. diff --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll @@ -728,14 +728,8 @@ define @intrinsic_vmv.v.x_i_nxv1i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v8, (a0), zero -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu +; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv1i64( @@ -748,14 +742,8 @@ define @intrinsic_vmv.v.x_i_nxv2i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v8, (a0), zero -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv2i64( @@ -768,14 +756,8 @@ define @intrinsic_vmv.v.x_i_nxv4i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v8, (a0), zero -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu +; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv4i64( @@ -788,14 +770,8 @@ define @intrinsic_vmv.v.x_i_nxv8i64_vlmax() nounwind { ; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: li a0, 3 -; CHECK-NEXT: sw a0, 12(sp) -; CHECK-NEXT: sw a0, 8(sp) -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu -; CHECK-NEXT: addi a0, sp, 8 -; CHECK-NEXT: vlse64.v v8, (a0), zero -; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu +; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmv.v.x.nxv8i64(