diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -329,6 +329,25 @@ [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; } + + // For destination vector type is the same as source vector. + // Input: (passthru, vector_in, vl, policy) + class RISCVUnaryAAUnMaskedP + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty, + LLVMMatchType<1>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } + + multiclass RISCVUnaryAAUnMaskedP { + if HasVV then + def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedP; + + if HasVS then + def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedP; + } + // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, vector_in, mask, vl, policy) class RISCVUnaryAAMasked @@ -425,6 +444,14 @@ let ScalarOperand = 2; let VLOperand = 3; } + class RISCVBinaryAAXUnMaskedP + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty, LLVMMatchType<2>], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 3; + } // For destination vector type is the same as first source vector (with mask). // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy) class RISCVBinaryAAXMasked @@ -1582,3 +1609,44 @@ def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; } // TargetPrefix = "riscv" + +//===----------------------------------------------------------------------===// +// Vector Cryptography +// +// These intrinsics will lower directly into the corresponding instructions +// added by the vector cyptography extension, if the extension is present. +let TargetPrefix = "riscv" in { + // zvkns + defm vaesdf : RISCVUnaryAAUnMaskedP; + defm vaesdm : RISCVUnaryAAUnMaskedP; + defm vaesef : RISCVUnaryAAUnMaskedP; + defm vaesem : RISCVUnaryAAUnMaskedP; + def int_riscv_vaeskf1 : RISCVBinaryAAXUnMaskedP; + def int_riscv_vaeskf2 : RISCVBinaryAAXUnMaskedP; + defm vaesz : RISCVUnaryAAUnMaskedP<0>; + + // zvkb + defm vandn : RISCVBinaryAAX; + defm vbrev8 : RISCVUnaryAA; + defm vclmul : RISCVBinaryAAX; + defm vclmulh : RISCVBinaryAAX; + defm vrev8 : RISCVUnaryAA; + defm vrol : RISCVBinaryAAX; + defm vror : RISCVBinaryAAX; + + // zvkg + def int_riscv_vghmac : RISCVBinaryAAXUnMaskedP; + + // zvksed + def int_riscv_vsm4k : RISCVBinaryAAXUnMaskedP; + defm vsm4r : RISCVUnaryAAUnMaskedP<1, 0>; + + // zvksh + def int_riscv_vsm3c : RISCVBinaryAAXUnMaskedP; + def int_riscv_vsm3me : RISCVBinaryAAXUnMaskedP; + + // zvknha or zvknhb + def int_riscv_vsha2ch : RISCVBinaryAAXUnMaskedP; + def int_riscv_vsha2cl : RISCVBinaryAAXUnMaskedP; + def int_riscv_vsha2ms : RISCVBinaryAAXUnMaskedP; +} // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -209,6 +209,22 @@ ValueType VectorM1 = VecM1; } +defset list I32I64IntegerVectors = { + defset list I32IntegerVectors = { + def : VTypeInfo; + def : VTypeInfo; + def : GroupVTypeInfo; + def : GroupVTypeInfo; + def : GroupVTypeInfo; + } + defset list I64IntegerVectors = { + def : VTypeInfo; + def : GroupVTypeInfo; + def : GroupVTypeInfo; + def : GroupVTypeInfo; + } +} + defset list AllVectors = { defset list AllIntegerVectors = { defset list NoGroupIntegerVectors = { @@ -1012,6 +1028,22 @@ let HasMergeOp = 1; } +class PPseudoUnaryNoMask : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasVecPolicyOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUnaryMask : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, OpClass:$rs2, @@ -1139,6 +1171,26 @@ let HasMergeOp = 1; } +class PPseudoBinaryNoMask : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let HasVecPolicyOp = 1; + let HasMergeOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Special version of VPseudoBinaryNoMask where we pretend the first source is // tied to the destination. // This allows maskedoff and rs2 to be the same register. @@ -1943,6 +1995,43 @@ defm _VV : VPseudoBinary; } +multiclass PPseudoBinaryNoMask { + let VLMul = MInfo.value in + def "_" # MInfo.MX : PPseudoBinaryNoMask; +} + +multiclass PPseudoBinaryV_VV_NoMask { + defm _VV : PPseudoBinaryNoMask; +} + +multiclass VPseudoUnaryV_V { + let VLMul = m.value in { + def "_V_" # m.MX : VPseudoUnaryNoMask; + + def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU; + + def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA, + RISCVMaskedPseudo; + } +} + +multiclass PPseudoUnaryV_V_NoMask { + let VLMul = m.value in { + def "_VV_" # m.MX : PPseudoUnaryNoMask; + } +} + +multiclass PPseudoUnaryV_S_NoMask { + let VLMul = m.value in { + def "_VS_" # m.MX : PPseudoUnaryNoMask; + } +} + // Similar to VPseudoBinaryV_VV, but uses MxListF. multiclass VPseudoBinaryFV_VV { foreach m = MxListF in @@ -2004,6 +2093,14 @@ defm _VI : VPseudoBinary; } +multiclass PPseudoBinaryV_VI { + defm _VI : VPseudoBinary; +} + +multiclass PPseudoBinaryV_VI_NoMask { + defm _VI : PPseudoBinaryNoMask; +} + multiclass VPseudoVALU_MM { foreach m = MxList in let VLMul = m.value in { @@ -2433,6 +2530,77 @@ } } +multiclass VPseudoVALU_V { + foreach m = MxList in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : VPseudoUnaryV_V, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass PPseudoVALU_V_NoMask { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : PPseudoUnaryV_V_NoMask, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass PPseudoVALU_S_NoMask { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : PPseudoUnaryV_S_NoMask, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass PPseudoVALU_V_S_NoMask { + defm "" : PPseudoVALU_V_NoMask; + defm "" : PPseudoVALU_S_NoMask; +} + +multiclass PPseudoVALU_VV_NoMask { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : PPseudoBinaryV_VV_NoMask, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass PPseudoVALU_VI { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : PPseudoBinaryV_VI, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass PPseudoVALU_VI_NoMask { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : PPseudoBinaryV_VI_NoMask, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + multiclass VPseudoVSALU_VV_VX { defm "" : VPseudoBinaryV_VV, Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>; @@ -2533,6 +2701,21 @@ } } +multiclass VPseudoVALU_VCLMUL { + foreach m = MxList in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar WriteVIALUX_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + defvar ReadVIALUX_MX = !cast("ReadVIALUX_" # mx); + + defm _VV : VPseudoBinary, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + defm _VX : VPseudoBinary, + Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>; + } +} + multiclass VPseudoVSGNJ_VV_VF { defm "" : VPseudoBinaryFV_VV, Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>; @@ -3369,6 +3552,24 @@ (op2_type op2_reg_class:$rs2), GPR:$vl, sew)>; +class PPatUnaryNoMask : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew, (XLenVT timm:$policy))>; + class VPatUnaryMask; +class PPatBinaryNoMask : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst) + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew, (XLenVT timm:$policy))>; + // Same as above but source operands are swapped. class VPatBinaryNoMaskSwapped vtilist> { + foreach vti = vtilist in + def : PPatUnaryNoMask; +} + +multiclass PPatUnaryV_S_NoMask vtilist> { + foreach vti = vtilist in + def : PPatUnaryNoMask; +} + +multiclass PPatUnaryV_V_S_NoMask vtilist> { + defm : PPatUnaryV_V_NoMask; + defm : PPatUnaryV_S_NoMask; +} + multiclass VPatNullaryV { foreach vti = AllIntegerVectors in { @@ -4014,6 +4257,15 @@ vti.RegClass, vti.RegClass>; } +multiclass VPatBinaryV_VV_NoMask vtilist> { + foreach vti = vtilist in + def : PPatBinaryNoMask; +} + multiclass VPatBinaryV_VV_INT vtilist> { foreach vti = vtilist in { @@ -4073,6 +4325,14 @@ vti.RegClass, imm_type>; } +multiclass VPatBinaryV_VI_NoMask vtilist, Operand imm_type = simm5> { + foreach vti = vtilist in + def : PPatBinaryNoMask; +} + multiclass VPatBinaryM_MM { foreach mti = AllMasks in def : VPatBinaryM; + defm PseudoVAESKF2 : PPseudoVALU_VI_NoMask; + defm PseudoVAESZ : PPseudoVALU_S_NoMask; +} // Predicates = [HasStdExtZvkns] + +let Predicates = [HasStdExtZvkb] in { + defm PseudoVANDN : VPseudoVALU_VV_VX_VI; + defm PseudoVBREV8 : VPseudoVALU_V; + defm PseudoVCLMUL : VPseudoVALU_VCLMUL; + defm PseudoVCLMULH : VPseudoVALU_VCLMUL; + defm PseudoVREV8 : VPseudoVALU_V; + defm PseudoVROL : VPseudoVALU_VV_VX_VI; + defm PseudoVROR : VPseudoVALU_VV_VX_VI; +} // Predicates = [HasStdExtZvkb] + +let Predicates = [HasStdExtZvkg] in { + defm PseudoVGHMAC : PPseudoVALU_VV_NoMask; +} // Predicates = [HasStdExtZvkg] + +let Predicates = [HasStdExtZvksed] in { + defm PseudoVSM4K : PPseudoVALU_VI_NoMask; + defm PseudoVSM4R : PPseudoVALU_V_NoMask; +} // Predicates = [HasStdExtZvksed] + +let Predicates = [HasStdExtZvksh] in { + defm PseudoVSM3C : PPseudoVALU_VI_NoMask; + defm PseudoVSM3ME : PPseudoVALU_VV_NoMask; +} // Predicates = [HasStdExtZvksh] + +let Predicates = [HasStdExtZvknhaOrZvknhb] in { + defm PseudoVSHA2CH : PPseudoVALU_VV_NoMask; + defm PseudoVSHA2CL : PPseudoVALU_VV_NoMask; + defm PseudoVSHA2MS : PPseudoVALU_VV_NoMask; +} // Predicates = [HasStdExtZvknhaOrZvknhb] + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -6007,6 +6307,46 @@ defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; } // Predicates = [HasVInstructionsAnyF] +let Predicates = [HasStdExtZvkns] in { + defm : PPatUnaryV_V_S_NoMask<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; + defm : PPatUnaryV_V_S_NoMask<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; + defm : PPatUnaryV_V_S_NoMask<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; + defm : PPatUnaryV_V_S_NoMask<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; + defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors, uimm5>; + defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors, uimm5>; + defm : PPatUnaryV_S_NoMask<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; +} // Predicates = [HasStdExtZvkns] + +let Predicates = [HasStdExtZvkb] in { + defm : VPatBinaryV_VV_VX_VI<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; + defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; + defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; + defm : VPatBinaryV_VV_VX_VI<"int_riscv_vrol", "PseudoVROL", AllIntegerVectors>; + defm : VPatBinaryV_VV_VX_VI<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; +} // Predicates = [HasStdExtZvkb] + +let Predicates = [HasStdExtZvkg] in { + defm : VPatBinaryV_VV_NoMask<"int_riscv_vghmac", "PseudoVGHMAC", I32IntegerVectors>; +} // Predicates = [HasStdExtZvkg] + +let Predicates = [HasStdExtZvksed] in { + defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors, uimm5>; + defm : PPatUnaryV_V_NoMask<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; +} // Predicates = [HasStdExtZvksed] + +let Predicates = [HasStdExtZvksh] in { + defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors, uimm5>; + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; +} // Predicates = [HasStdExtZvksh] + +let Predicates = [HasStdExtZvknhaOrZvknhb] in { + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>; +} // Predicates = [HasStdExtZvknhaOrZvknhb] + // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVVLPatterns.td" include "RISCVInstrInfoVSDPatterns.td" diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesdf.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesdm.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesef.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesef.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesef.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesef.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesef.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesef.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesef.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesef.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesef.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesef.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesef.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesem.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesem.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesem.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesem.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesem.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesem.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesem.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesem.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesem.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesem.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesem.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaeskf1.nxv1i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv1i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaeskf1.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv1i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv2i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaeskf1.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv2i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv4i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaeskf1.vi v8, v10, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv4i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv8i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaeskf1.vi v8, v12, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv8i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv16i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaeskf1.vi v8, v16, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv16i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaeskf2.nxv1i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv1i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv1i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv2i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv2i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv4i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v10, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv4i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv8i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v12, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv8i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv16i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v16, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv16i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkns \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesz.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesz.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesz.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesz.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesz.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesz.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn.ll b/llvm/test/CodeGen/RISCV/rvv/vandn.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vandn.ll @@ -0,0 +1,2899 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vandn.nxv1i8.nxv1i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i8.nxv2i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i8.nxv4i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i8.nxv8i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i8.nxv16i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i8.nxv32i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv64i8.nxv64i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i16.nxv1i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i16.nxv2i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i16.nxv4i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i16.nxv8i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i16.nxv16i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i16.nxv32i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vandn.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vandn.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv64i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vandn.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vandn.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vandn.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vandn.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vandn.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vandn.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vandn.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vandn.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vandn.vx v8, v9, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vandn.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vandn.vx v8, v10, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vandn.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vandn.vx v8, v12, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vandn.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vandn.vx v8, v16, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vandn.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vandn.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv64i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vandn.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv64i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vandn.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vandn.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vandn.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vandn.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vandn.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vandn.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vandn.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vandn.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vandn.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vandn_vi_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vandn_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vandn.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vandn_mask_vi_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vandn.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vbrev8.nxv1i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv16i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv32i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv64i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv1i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv16i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv32i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv1i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv16i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv1i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll @@ -0,0 +1,478 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vclmul.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vclmul.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vclmul.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vclmul.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vclmul.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmul.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vclmul.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmul.vv v8, v10, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vclmul.vx v8, v10, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmul.vv v8, v12, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vclmul.vx v8, v12, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vclmul.vv v8, v16, v24 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vclmul.vx v8, v16, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll @@ -0,0 +1,478 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vclmulh.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vclmulh.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v10, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vclmulh.vx v8, v10, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v12, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vclmulh.vx v8, v12, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v16, v24 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vclmulh.vx v8, v16, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vghmac.ll b/llvm/test/CodeGen/RISCV/rvv/vghmac.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vghmac.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkg \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkg \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vghmac.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghmac_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghmac_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vghmac.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghmac.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghmac.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghmac_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghmac_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vghmac.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghmac.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghmac.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghmac_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghmac_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vghmac.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghmac.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghmac.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghmac_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghmac_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vghmac.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghmac.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghmac.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghmac_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghmac_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vghmac.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghmac.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vrev8.nxv1i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv16i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv32i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv64i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv1i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv16i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv32i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv1i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv16i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv1i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol.ll b/llvm/test/CodeGen/RISCV/rvv/vrol.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrol.ll @@ -0,0 +1,2899 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vrol.nxv1i8.nxv1i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i8.nxv2i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i8.nxv4i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i8.nxv8i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i8.nxv16i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i8.nxv32i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv64i8.nxv64i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i16.nxv1i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i16.nxv2i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i16.nxv4i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i16.nxv8i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i16.nxv16i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i16.nxv32i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vrol.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vrol.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv64i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vrol.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vrol.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vrol.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vrol.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vrol.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vrol.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vrol.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vrol.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vrol.vx v8, v9, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vrol.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vrol.vx v8, v10, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vrol.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vrol.vx v8, v12, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vrol.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vrol.vx v8, v16, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vrol.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vrol.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv64i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vrol.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv64i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrol.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrol.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vrol.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vrol.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vrol.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vrol.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vrol.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vrol.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vrol.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vrol_vi_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrol_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrol.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vrol_mask_vi_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vrol.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vror.ll b/llvm/test/CodeGen/RISCV/rvv/vror.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vror.ll @@ -0,0 +1,2899 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvkb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vror.nxv1i8.nxv1i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i8.nxv2i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i8.nxv4i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i8.nxv8i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i8.nxv16i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i8.nxv32i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv64i8.nxv64i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i16.nxv1i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i16.nxv2i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i16.nxv4i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i16.nxv8i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i16.nxv16i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i16.nxv32i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vror.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vror.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv64i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vror.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vror.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vror.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vror.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vror.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vror.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vror.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vror.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vror.vx v8, v9, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vror.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vror.vx v8, v10, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vror.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vror.vx v8, v12, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vror.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vror.vx v8, v16, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv64i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv64i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvknha \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvknha \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsha2ch.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv1i64.nxv1i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv2i64.nxv2i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv4i64.nxv4i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv8i64.nxv8i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsha2cl.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv1i64.nxv1i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv2i64.nxv2i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvknha,+zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvknha,+zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsha2ms.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv1i64.nxv1i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv2i64.nxv2i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv4i64.nxv4i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv8i64.nxv8i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm3c.nxv1i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv1i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv1i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv2i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv2i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv4i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v10, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv4i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv8i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v12, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv8i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv16i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v16, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv16i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm3me.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsm3me_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm3me.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsm3me_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm3me.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsm3me_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm3me.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsm3me_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm3me.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsm3me_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsm3me.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm4k.nxv1i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv1i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm4k.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv1i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv2i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm4k.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv2i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv4i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm4k.vi v8, v10, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv4i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv8i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm4k.vi v8, v12, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv8i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv16i32.i32( + , + , + i32, + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsm4k.vi v8, v16, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv16i32.i32( + %0, + %1, + i32 2, + iXLen %2, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm4r.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +}