diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -404,6 +404,12 @@ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Output: (mask type output) + // Input: (vl) + class RISCVNullaryIntrinsic + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -701,6 +707,8 @@ def int_riscv_vmnor: RISCVBinaryAAANoMask; def int_riscv_vmornot: RISCVBinaryAAANoMask; def int_riscv_vmxnor: RISCVBinaryAAANoMask; + def int_riscv_vmclr : RISCVNullaryIntrinsic; + def int_riscv_vmset : RISCVNullaryIntrinsic; defm vpopc : RISCVMaskUnarySOut; defm vfirst : RISCVMaskUnarySOut; diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -60,6 +60,9 @@ MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI); bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); + bool expandVMSET_VMCLR(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + bool IsVMCLR = false); }; char RISCVExpandPseudo::ID = 0; @@ -102,6 +105,22 @@ return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI); case RISCV::PseudoVSETVLI: return expandVSetVL(MBB, MBBI); + case RISCV::PseudoVMCLR_M_B1: + case RISCV::PseudoVMCLR_M_B2: + case RISCV::PseudoVMCLR_M_B4: + case RISCV::PseudoVMCLR_M_B8: + case RISCV::PseudoVMCLR_M_B16: + case RISCV::PseudoVMCLR_M_B32: + case RISCV::PseudoVMCLR_M_B64: + return expandVMSET_VMCLR(MBB, MBBI, /*IsVMCLR=*/true); + case RISCV::PseudoVMSET_M_B1: + case RISCV::PseudoVMSET_M_B2: + case RISCV::PseudoVMSET_M_B4: + case RISCV::PseudoVMSET_M_B8: + case RISCV::PseudoVMSET_M_B16: + case RISCV::PseudoVMSET_M_B32: + case RISCV::PseudoVMSET_M_B64: + return expandVMSET_VMCLR(MBB, MBBI); } return false; @@ -213,6 +232,22 @@ return true; } +bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + bool IsVMCLR) { + MachineInstr &MI = *MBBI; + DebugLoc DL = MI.getDebugLoc(); + Register DstReg = MBBI->getOperand(0).getReg(); + const MCInstrDesc &Desc = + IsVMCLR ? TII->get(RISCV::VMXOR_MM) : TII->get(RISCV::VMXNOR_MM); + BuildMI(MBB, MBBI, DL, Desc) + .addReg(DstReg, RegState::Define) + .addReg(DstReg, RegState::Undef) + .addReg(DstReg, RegState::Undef); + MBBI->eraseFromParent(); // The pseudo instruction is gone now. + return true; +} + } // end of anonymous namespace INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo", diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -538,6 +538,29 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoNullaryM : Pseudo<(outs VR:$rd), (ins GPR:$vl, ixlenimm:$sew), []> { + let hasSideEffects = 0; + let mayLoad = 0; + let mayStore = 0; + let Defs = [VL, VTYPE]; +} + +// Nullary for VMCLR and VMSET +class VPseudoNullaryPseudoM + : Pseudo<(outs VR:$rd), (ins GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 1; + let SEWIndex = 2; + // set base instr as pseduo instruction which would expand later. + let BaseInstr = !cast("Pseudo" # PseudoToVInst.VInst); +} + + class VPseudoUnaryNoMask : Pseudo<(outs RetClass:$rd), (ins OpClass:$rs2, GPR:$vl, ixlenimm:$sew), []>, @@ -821,6 +844,16 @@ } } +multiclass VPseudoNullaryM { + // Pseudo instructions which will expand into corresponding v-inst + def "_M" : VPseudoNullaryM; + foreach mti = AllMasks in { + let VLMul = mti.LMul.value in { + def "_M_" # mti.BX : VPseudoNullaryPseudoM; + } + } +} + multiclass VPseudoUnaryV_M { defvar constraint = "@earlyclobber $rd"; foreach m = MxList.m in { @@ -1464,6 +1497,15 @@ } } +multiclass VPatNullaryM { + foreach mti = AllMasks in + def : Pat<(mti.Mask (!cast(intrinsic) + (XLenVT GPR:$vl))), + (!cast(inst#"_M_"#mti.BX) + (NoX0 GPR:$vl), mti.SEW)>; +} + + multiclass VPatBinary; defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">; +// pseudo instructions +defm "" : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">; +defm "" : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">; + //===----------------------------------------------------------------------===// // 16.2. Vector mask population count vpopc //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmclr.nxv1i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv1i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv2i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv2i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv4i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv4i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv8i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv8i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv16i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv16i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv32i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv32i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv64i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv64i1( + i32 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmclr.nxv1i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv1i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv2i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv2i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv4i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv4i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv8i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv8i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv16i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv16i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv32i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv32i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv64i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmxor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv64i1( + i64 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmset.nxv1i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv1i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv2i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv2i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv4i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv4i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv8i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv8i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv16i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv16i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv32i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv32i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv64i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv64i1( + i32 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmset.nxv1i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv1i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv1i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv2i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv2i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv2i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv4i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv4i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv4i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv8i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv8i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv8i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv16i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv16i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv16i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv32i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv32i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv32i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv64i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv64i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmxnor.mm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv64i1( + i64 %0) + + ret %a +}