diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -404,6 +404,12 @@ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // Output: (vector) + // Input: (vl) + class RISCVNullaryIntrinsic + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -701,6 +707,8 @@ def int_riscv_vmnor: RISCVBinaryAAANoMask; def int_riscv_vmornot: RISCVBinaryAAANoMask; def int_riscv_vmxnor: RISCVBinaryAAANoMask; + def int_riscv_vmclr : RISCVNullaryIntrinsic; + def int_riscv_vmset : RISCVNullaryIntrinsic; defm vpopc : RISCVMaskUnarySOut; defm vfirst : RISCVMaskUnarySOut; @@ -724,9 +732,8 @@ [IntrNoMem]>, RISCVVIntrinsic; // Output: (vector) // Input: (vl) - def int_riscv_vid : Intrinsic<[llvm_anyvector_ty], - [llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vid : RISCVNullaryIntrinsic; + // Output: (vector) // Input: (maskedoff, mask, vl) def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty], diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -60,6 +60,8 @@ MachineBasicBlock::iterator MBBI, MachineBasicBlock::iterator &NextMBBI); bool expandVSetVL(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI); + bool expandVMSET_VMCLR(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, unsigned Opcode); }; char RISCVExpandPseudo::ID = 0; @@ -102,6 +104,24 @@ return expandLoadTLSGDAddress(MBB, MBBI, NextMBBI); case RISCV::PseudoVSETVLI: return expandVSetVL(MBB, MBBI); + case RISCV::PseudoVMCLR_M_B1: + case RISCV::PseudoVMCLR_M_B2: + case RISCV::PseudoVMCLR_M_B4: + case RISCV::PseudoVMCLR_M_B8: + case RISCV::PseudoVMCLR_M_B16: + case RISCV::PseudoVMCLR_M_B32: + case RISCV::PseudoVMCLR_M_B64: + // vmclr.m vd => vmxor.mm vd, vd, vd + return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXOR_MM); + case RISCV::PseudoVMSET_M_B1: + case RISCV::PseudoVMSET_M_B2: + case RISCV::PseudoVMSET_M_B4: + case RISCV::PseudoVMSET_M_B8: + case RISCV::PseudoVMSET_M_B16: + case RISCV::PseudoVMSET_M_B32: + case RISCV::PseudoVMSET_M_B64: + // vmset.m vd => vmxnor.mm vd, vd, vd + return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM); } return false; @@ -213,6 +233,19 @@ return true; } +bool RISCVExpandPseudo::expandVMSET_VMCLR(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned Opcode) { + DebugLoc DL = MBBI->getDebugLoc(); + Register DstReg = MBBI->getOperand(0).getReg(); + const MCInstrDesc &Desc = TII->get(Opcode); + BuildMI(MBB, MBBI, DL, Desc, DstReg) + .addReg(DstReg, RegState::Undef) + .addReg(DstReg, RegState::Undef); + MBBI->eraseFromParent(); // The pseudo instruction is gone now. + return true; +} + } // end of anonymous namespace INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo", diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -538,6 +538,23 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +// Nullary for pseudo instructions. They are expanded in +// RISCVExpandPseudoInsts pass. +class VPseudoNullaryPseudoM + : Pseudo<(outs VR:$rd), (ins GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 1; + let SEWIndex = 2; + // BaseInstr is not used in RISCVExpandPseudoInsts pass. + // Just fill a corresponding real v-inst to pass tablegen check. + let BaseInstr = !cast(BaseInst); +} + // RetClass could be GPR or VReg. class VPseudoUnaryNoMask : Pseudo<(outs RetClass:$rd), @@ -821,6 +838,14 @@ } } +multiclass VPseudoNullaryPseudoM { + foreach mti = AllMasks in { + let VLMul = mti.LMul.value in { + def "_M_" # mti.BX : VPseudoNullaryPseudoM; + } + } +} + multiclass VPseudoUnaryV_M { defvar constraint = "@earlyclobber $rd"; foreach m = MxList.m in { @@ -1464,6 +1489,15 @@ } } +multiclass VPatNullaryM { + foreach mti = AllMasks in + def : Pat<(mti.Mask (!cast(intrinsic) + (XLenVT GPR:$vl))), + (!cast(inst#"_M_"#mti.BX) + (NoX0 GPR:$vl), mti.SEW)>; +} + + multiclass VPatBinary; +defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">; + //===----------------------------------------------------------------------===// // 16.2. Vector mask population count vpopc //===----------------------------------------------------------------------===// @@ -2913,6 +2951,10 @@ defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">; defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">; +// pseudo instructions +defm "" : VPatNullaryM<"int_riscv_vmclr", "PseudoVMCLR">; +defm "" : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">; + //===----------------------------------------------------------------------===// // 16.2. Vector mask population count vpopc //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv32.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmclr.nxv1i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv1i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv1i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv2i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv2i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv2i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv4i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv4i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv4i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv8i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv8i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv8i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv16i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv16i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv16i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv32i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv32i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv32i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv64i1( + i32); + +define @intrinsic_vmclr_m_pseudo_nxv64i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv64i1( + i32 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmclr-rv64.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmclr.nxv1i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv1i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv1i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv2i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv2i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv2i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv4i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv4i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv4i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv8i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv8i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv8i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv16i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv16i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv16i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv32i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv32i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv32i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmclr.nxv64i1( + i64); + +define @intrinsic_vmclr_m_pseudo_nxv64i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmclr.m {{v[0-9]+}} + %a = call @llvm.riscv.vmclr.nxv64i1( + i64 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmset-rv32.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmset.nxv1i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv1i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv1i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv2i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv2i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv2i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv4i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv4i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv4i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv8i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv8i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv8i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv16i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv16i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv16i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv32i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv32i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv32i1( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv64i1( + i32); + +define @intrinsic_vmset_m_pseudo_nxv64i1(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv64i1( + i32 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmset-rv64.ll @@ -0,0 +1,99 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh -verify-machineinstrs \ +; RUN: < %s | FileCheck %s +declare @llvm.riscv.vmset.nxv1i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv1i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv1i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv2i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv2i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv2i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv4i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv4i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv4i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv8i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv8i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv8i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv16i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv16i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv16i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv32i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv32i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv32i1( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vmset.nxv64i1( + i64); + +define @intrinsic_vmset_m_pseudo_nxv64i1(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vmset.m {{v[0-9]+}} + %a = call @llvm.riscv.vmset.nxv64i1( + i64 %0) + + ret %a +}