diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1139,4 +1139,11 @@ defm vsuxseg # nf : RISCVISegStore; } + // Intrinsics for converting between different LMULs. + def int_riscv_vlmul_trunc : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], + [IntrNoMem]>; + def int_riscv_vlmul_ext : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], + [IntrNoMem]>; + + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2109,6 +2109,15 @@ case Intrinsic::riscv_vfmv_v_f: return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::riscv_vlmul_ext: { + MVT VT = Op.getSimpleValueType(); + return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), + Op.getOperand(1), DAG.getIntPtrConstant(0, DL)); + } + case Intrinsic::riscv_vlmul_trunc: + MVT VT = Op.getSimpleValueType(); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, + Op.getOperand(1), DAG.getIntPtrConstant(0, DL)); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlmul.ll b/llvm/test/CodeGen/RISCV/rvv/vlmul.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlmul.ll @@ -0,0 +1,92 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.riscv.vlmul.trunc.nxv1i8.nxv2i8() +declare @llvm.riscv.vlmul.trunc.nxv1i8.nxv16i8() + +declare @llvm.riscv.vlmul.ext.nxv2i8.nxv1i8() +declare @llvm.riscv.vlmul.ext.nxv16i8.nxv1i8() + +declare @llvm.riscv.vadd.nxv16i8.nxv16i8(, , i64) +declare @llvm.riscv.vadd.nxv2i8.nxv2i8(, , i64) +declare @llvm.riscv.vadd.nxv1i8.nxv1i8(, , i64) + +declare @llvm.riscv.vle.nxv16i8(*, i64) +declare @llvm.riscv.vle.nxv2i8(*, i64) +declare @llvm.riscv.vle.nxv1i8(*, i64) + +define @test_vlmul_trunc_i8mf4_i8mf8_vl(i64 %vl, * %lhs, * %rhs) nounwind { +; CHECK-LABEL: test_vlmul_trunc_i8mf4_i8mf8_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a0, e8,mf4,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vle8.v v26, (a2) +; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vv v8, v25, v25 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv2i8(* %lhs, i64 %vl) + %b = call @llvm.riscv.vle.nxv2i8(* %rhs, i64 %vl) + %add = call @llvm.riscv.vadd.nxv2i8.nxv2i8( %a, %b, i64 %vl) + %0 = tail call @llvm.riscv.vlmul.trunc.nxv1i8.nxv2i8( %add) + %result = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8( %0, %0, i64 %vl) + ret %result +} + +define @test_vlmul_trunc_i8m2_i8mf8_vl(i64 %vl, * %lhs, * %rhs) nounwind { +; CHECK-LABEL: test_vlmul_trunc_i8m2_i8mf8_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a0, e8,m2,ta,mu +; CHECK-NEXT: vle8.v v26, (a1) +; CHECK-NEXT: vle8.v v28, (a2) +; CHECK-NEXT: vadd.vv v26, v26, v28 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vadd.vv v8, v26, v26 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv16i8(* %lhs, i64 %vl) + %b = call @llvm.riscv.vle.nxv16i8(* %rhs, i64 %vl) + %add = call @llvm.riscv.vadd.nxv16i8.nxv16i8( %a, %b, i64 %vl) + %0 = tail call @llvm.riscv.vlmul.trunc.nxv1i8.nxv16i8( %add) + %result = tail call @llvm.riscv.vadd.nxv1i8.nxv1i8( %0, %0, i64 %vl) + ret %result +} + +define @test_vlmul_ext_i8mf4_i8mf8_vl(i64 %vl, * %lhs, * %rhs) nounwind { +; CHECK-LABEL: test_vlmul_ext_i8mf4_i8mf8_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a0, e8,mf8,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vle8.v v26, (a2) +; CHECK-NEXT: vadd.vv v25, v25, v26 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vadd.vv v8, v25, v25 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i8(* %lhs, i64 %vl) + %b = call @llvm.riscv.vle.nxv1i8(* %rhs, i64 %vl) + %add = call @llvm.riscv.vadd.nxv1i8.nxv1i8( %a, %b, i64 %vl) + %0 = tail call @llvm.riscv.vlmul.ext.nxv2i8.nxv1i8( %add) + %result = tail call @llvm.riscv.vadd.nxv2i8.nxv2i8( %0, %0, i64 %vl) + ret %result +} + +define @test_vlmul_ext_i8m2_i8mf8_vl(i64 %vl, * %lhs, * %rhs) nounwind { +; CHECK-LABEL: test_vlmul_ext_i8m2_i8mf8_vl: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, a0, e8,mf8,ta,mu +; CHECK-NEXT: vle8.v v25, (a1) +; CHECK-NEXT: vle8.v v26, (a2) +; CHECK-NEXT: vadd.vv v26, v25, v26 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vadd.vv v8, v26, v26 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vle.nxv1i8(* %lhs, i64 %vl) + %b = call @llvm.riscv.vle.nxv1i8(* %rhs, i64 %vl) + %add = call @llvm.riscv.vadd.nxv1i8.nxv1i8( %a, %b, i64 %vl) + %0 = tail call @llvm.riscv.vlmul.ext.nxv16i8.nxv1i8( %add) + %result = tail call @llvm.riscv.vadd.nxv16i8.nxv16i8( %0, %0, i64 %vl) + ret %result +}