diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -313,6 +313,23 @@ [IntrNoMem]>, RISCVVIntrinsic { let VLOperand = 2; } + // For destination vector type is the same as the source vector type + // Input: (passthru, vector_in, vl, policy) + class RISCVUnaryAAUnMaskedZvk + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, !if(IsVS, llvm_anyvector_ty, LLVMMatchType<0>), + llvm_anyint_ty, !if(IsVS, LLVMMatchType<2>, LLVMMatchType<1>)], + [ImmArg>, IntrNoMem]>, RISCVVIntrinsic { + let VLOperand = 2; + } + + multiclass RISCVUnaryAAUnMaskedZvk { + if HasVV then + def "int_riscv_" # NAME # "_vv" : RISCVUnaryAAUnMaskedZvk; + + if HasVS then + def "int_riscv_" # NAME # "_vs" : RISCVUnaryAAUnMaskedZvk; + } // For destination vector type is the same as first source vector (with mask). // Input: (vector_in, vector_in, mask, vl, policy) class RISCVUnaryAAMasked @@ -401,11 +418,25 @@ } // For destination vector type is the same as first source vector. // Input: (passthru, vector_in, vector_in/scalar_in, vl) - class RISCVBinaryAAXUnMasked + class RISCVBinaryAAXUnMasked : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, llvm_anyint_ty], - [IntrNoMem]>, RISCVVIntrinsic { + !listconcat([IntrNoMem], + !if(IsVI, [ImmArg>], []))>, + RISCVVIntrinsic { + let ScalarOperand = 2; + let VLOperand = 3; + } + // For destination vector type is the same as the source vector type. + // Input: (passthru, vector_in, vector_in/scalar_in, vl, policy) + class RISCVBinaryAAXUnMaskedZvk + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + llvm_any_ty, llvm_anyint_ty, LLVMMatchType<2>], + !listconcat([ImmArg>, IntrNoMem], + !if(IsVI, [ImmArg>], []))>, + RISCVVIntrinsic { let ScalarOperand = 2; let VLOperand = 3; } @@ -1663,6 +1694,54 @@ } // TargetPrefix = "riscv" //===----------------------------------------------------------------------===// +// Vector Cryptography +// +// These intrinsics will lower directly into the corresponding instructions +// added by the vector cyptography extension, if the extension is present. +let TargetPrefix = "riscv" in { + // Zvbb + defm vandn : RISCVBinaryAAX; + defm vbrev : RISCVUnaryAA; + defm vbrev8 : RISCVUnaryAA; + defm vrev8 : RISCVUnaryAA; + defm vclz : RISCVUnaryAA; + defm vctz : RISCVUnaryAA; + defm vcpopv : RISCVUnaryAA; + defm vrol : RISCVBinaryAAX; + defm vror : RISCVBinaryAAX; + defm vwsll : RISCVBinaryABX; + + // Zvbc + defm vclmul : RISCVBinaryAAX; + defm vclmulh : RISCVBinaryAAX; + + // Zvkg + def int_riscv_vghsh : RISCVBinaryAAXUnMaskedZvk; + def int_riscv_vgmul_vv : RISCVUnaryAAUnMaskedZvk; + + // Zvkned + defm vaesdf : RISCVUnaryAAUnMaskedZvk; + defm vaesdm : RISCVUnaryAAUnMaskedZvk; + defm vaesef : RISCVUnaryAAUnMaskedZvk; + defm vaesem : RISCVUnaryAAUnMaskedZvk; + def int_riscv_vaeskf1 : RISCVBinaryAAXUnMasked; + def int_riscv_vaeskf2 : RISCVBinaryAAXUnMaskedZvk; + defm vaesz : RISCVUnaryAAUnMaskedZvk; + + // Zvknha or Zvknhb + def int_riscv_vsha2ch : RISCVBinaryAAXUnMaskedZvk; + def int_riscv_vsha2cl : RISCVBinaryAAXUnMaskedZvk; + def int_riscv_vsha2ms : RISCVBinaryAAXUnMaskedZvk; + + // Zvksed + def int_riscv_vsm4k : RISCVBinaryAAXUnMasked; + defm vsm4r : RISCVUnaryAAUnMaskedZvk; + + // Zvksh + def int_riscv_vsm3c : RISCVBinaryAAXUnMaskedZvk; + def int_riscv_vsm3me : RISCVBinaryAAXUnMasked; +} // TargetPrefix = "riscv" + // Vendor extensions //===----------------------------------------------------------------------===// include "llvm/IR/IntrinsicsRISCVXTHead.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td @@ -230,6 +230,18 @@ let OperandNamespace = "RISCVOp"; } +def timm5 : Operand, TImmLeaf(Imm);}]> { + let ParserMatchClass = SImmAsmOperand<5>; + let EncoderMethod = "getImmOpValue"; + let DecoderMethod = "decodeSImmOperand<5>"; + let MCOperandPredicate = [{ + int64_t Imm; + if (MCOp.evaluateAsConstantImm(Imm)) + return isInt<5>(Imm); + return MCOp.isBareSymbolRef(); + }]; +} + def InsnDirectiveOpcode : AsmOperandClass { let Name = "InsnDirectiveOpcode"; let ParserMethod = "parseInsnDirectiveOpcode"; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -189,7 +189,7 @@ // Use for zext/sext.vf2 defvar MxListVF2 = [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8]; -// Use for zext/sext.vf4 +// Use for zext/sext.vf4 and vector crypto instructions defvar MxListVF4 = [V_MF2, V_M1, V_M2, V_M4, V_M8]; // Use for zext/sext.vf8 @@ -358,6 +358,10 @@ } } +defvar I32IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 32)); +defvar I32I64IntegerVectors = !filter(vti, AllIntegerVectors, + !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64))); + // This functor is used to obtain the int vector type that has the same SEW and // multiplier as the input parameter type class GetIntVTypeInfo @@ -1054,6 +1058,20 @@ let HasSEWOp = 1; } +class VPseudoUnaryNoMask_Zvk : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasVecPolicyOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUnaryMask : Pseudo<(outs GetVRegNoV0.R:$rd), (ins GetVRegNoV0.R:$merge, OpClass:$rs2, @@ -1222,6 +1240,24 @@ let HasRoundModeOp = 1; } +class VPseudoBinaryNoMask_Zvk : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, + AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasVecPolicyOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + // Special version of VPseudoBinaryNoMask where we pretend the first source is // tied to the destination. // This allows maskedoff and rs2 to be the same register. @@ -2076,6 +2112,43 @@ defm _VV : VPseudoBinaryRoundingMode; } +multiclass VPseudoBinaryNoMask_Zvk { + let VLMul = MInfo.value in + def "_" # MInfo.MX : VPseudoBinaryNoMask_Zvk; +} + +multiclass VPseudoBinaryV_VV_NoMask_Zvk { + defm _VV : VPseudoBinaryNoMask_Zvk; +} + +multiclass VPseudoUnaryV_V { + let VLMul = m.value in { + def "_V_" # m.MX : VPseudoUnaryNoMask; + + def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU; + + def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask, + RISCVMaskedPseudo; + } +} + +multiclass VPseudoUnaryV_V_NoMask_Zvk { + let VLMul = m.value in { + def "_VV_" # m.MX : VPseudoUnaryNoMask_Zvk; + } +} + +multiclass VPseudoUnaryV_S_NoMask_Zvk { + let VLMul = m.value in { + def "_VS_" # m.MX : VPseudoUnaryNoMask_Zvk; + } +} + // Similar to VPseudoBinaryV_VV, but uses MxListF. multiclass VPseudoBinaryFV_VV { defm _VV : VPseudoBinary; @@ -2152,6 +2225,10 @@ defm _VI : VPseudoBinaryRoundingMode; } +multiclass VPseudoBinaryV_VI_NoMask_Zvk { + defm _VI : VPseudoBinaryNoMask_Zvk; +} + multiclass VPseudoVALU_MM { foreach m = MxList in { defvar mx = m.MX; @@ -2182,6 +2259,11 @@ "@earlyclobber $rd">; } +multiclass VPseudoBinaryW_VI { + defm "_VI" : VPseudoBinary; +} + multiclass VPseudoBinaryW_VF { defm "_V" # f.FX : VPseudoBinary { + foreach m = MxList in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : VPseudoUnaryV_V, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass VPseudoVALU_V_NoMask_Zvk { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : VPseudoUnaryV_V_NoMask_Zvk, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass VPseudoVALU_S_NoMask_Zvk { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : VPseudoUnaryV_S_NoMask_Zvk, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass VPseudoVALU_V_S_NoMask_Zvk { + defm "" : VPseudoVALU_V_NoMask_Zvk; + defm "" : VPseudoVALU_S_NoMask_Zvk; +} + +multiclass VPseudoVALU_VV_NoMask_Zvk { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : VPseudoBinaryV_VV_NoMask_Zvk, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass VPseudoVALU_VI_NoMask_Zvk { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + defm "" : VPseudoBinaryV_VI_NoMask_Zvk, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass VPseudoVALU_VI_NoMaskTU_Zvk { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + def "_VI_" # m.MX : VPseudoBinaryNoMask, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + def "_VI_" # m.MX # "_TU" : VPseudoBinaryNoMaskTU, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} + +multiclass VPseudoVALU_VV_NoMaskTU_Zvk { + foreach m = MxListVF4 in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + + def "_VV_" # m.MX : VPseudoBinaryNoMask, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + def "_VV_" # m.MX # "_TU" : VPseudoBinaryNoMaskTU, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + } +} multiclass VPseudoVSALU_VV_VX { foreach m = MxList in { defvar mx = m.MX; @@ -2786,6 +2953,21 @@ } } +multiclass VPseudoVCLMUL_VV_VX { + foreach m = MxList in { + defvar mx = m.MX; + defvar WriteVIALUV_MX = !cast("WriteVIALUV_" # mx); + defvar WriteVIALUX_MX = !cast("WriteVIALUV_" # mx); + defvar ReadVIALUV_MX = !cast("ReadVIALUV_" # mx); + defvar ReadVIALUX_MX = !cast("ReadVIALUX_" # mx); + + defm "" : VPseudoBinaryV_VV, + Sched<[WriteVIALUV_MX, ReadVIALUV_MX, ReadVIALUV_MX, ReadVMask]>; + defm "" : VPseudoBinaryV_VX, + Sched<[WriteVIALUX_MX, ReadVIALUV_MX, ReadVIALUX_MX, ReadVMask]>; + } +} + multiclass VPseudoVSGNJ_VV_VF { foreach m = MxListF in { defvar mx = m.MX; @@ -2898,6 +3080,24 @@ } } +multiclass VPseudoVWALU_VV_VX_VI { + foreach m = MxListW in { + defvar mx = m.MX; + defvar WriteVIWALUV_MX = !cast("WriteVIWALUV_" # mx); + defvar WriteVIWALUX_MX = !cast("WriteVIWALUX_" # mx); + defvar WriteVIWALUI_MX = !cast("WriteVIWALUI_" # mx); + defvar ReadVIWALUV_MX = !cast("ReadVIWALUV_" # mx); + defvar ReadVIWALUX_MX = !cast("ReadVIWALUX_" # mx); + + defm "" : VPseudoBinaryW_VV, + Sched<[WriteVIWALUV_MX, ReadVIWALUV_MX, ReadVIWALUV_MX, ReadVMask]>; + defm "" : VPseudoBinaryW_VX, + Sched<[WriteVIWALUX_MX, ReadVIWALUV_MX, ReadVIWALUX_MX, ReadVMask]>; + defm "" : VPseudoBinaryW_VI, + Sched<[WriteVIWALUI_MX, ReadVIWALUV_MX, ReadVMask]>; + } +} + multiclass VPseudoVWMUL_VV_VX { foreach m = MxListW in { defvar mx = m.MX; @@ -3968,6 +4168,24 @@ (op2_type op2_reg_class:$rs2), GPR:$vl, log2sew)>; +class VPatUnaryNoMask_Zvk : + Pat<(result_type (!cast(intrinsic_name) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + VLOpFrag, (XLenVT timm:$policy))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + GPR:$vl, sew, (XLenVT timm:$policy))>; + class VPatUnaryMask vtilist> { + foreach vti = vtilist in + def : VPatUnaryNoMask_Zvk; +} + +multiclass VPatUnaryV_S_NoMaskVectorCtypto vtilist> { + foreach vti = vtilist in { + defvar vectorM1 = !cast("VI" # vti.SEW # "M1"); + def : VPatUnaryNoMask_Zvk; + } +} + +multiclass VPatUnaryV_V_S_NoMask_Zvk vtilist> { + defm : VPatUnaryV_V_NoMask_Zvk; + defm : VPatUnaryV_S_NoMaskVectorCtypto; +} + multiclass VPatNullaryV { foreach vti = AllIntegerVectors in { @@ -4610,6 +4852,22 @@ op2_kind>; } +multiclass VPatBinaryTANoMask +{ + def : VPatBinaryNoMaskTA; + def : VPatBinaryNoMaskTU; +} + multiclass VPatBinarySwapped vtilist> { + foreach vti = vtilist in + def : VPatTernaryNoMaskWithPolicy; +} + multiclass VPatBinaryV_VV_INT_EEW vtilist> { foreach vti = vtilist in { @@ -4849,6 +5116,31 @@ vti.RegClass, imm_type>; } +multiclass VPatBinaryV_VI_NoMask vtilist, Operand imm_type = timm5> { + foreach vti = vtilist in + def : VPatTernaryNoMaskWithPolicy; +} + +multiclass VPatBinaryV_VI_NoMaskTA vtilist, Operand imm_type = timm5> { + foreach vti = vtilist in + defm : VPatBinaryTANoMask; +} + +multiclass VPatBinaryV_VV_NoMaskTA vtilist> { + foreach vti = vtilist in + defm : VPatBinaryTANoMask; +} + multiclass VPatBinaryM_MM { foreach mti = AllMasks in let Predicates = [HasVInstructions] in @@ -4886,6 +5178,18 @@ } } +multiclass VPatBinaryW_VI vtilist> { + foreach VtiToWti = vtilist in { + defvar Vti = VtiToWti.Vti; + defvar Wti = VtiToWti.Wti; + defm : VPatBinaryTA; + } +} + multiclass VPatBinaryW_WV vtilist> { foreach VtiToWti = vtilist in { @@ -5188,6 +5492,12 @@ : VPatBinaryW_VV, VPatBinaryW_VX; +multiclass VPatBinaryW_VV_VX_VI vtilist> + : VPatBinaryW_VV, + VPatBinaryW_VX, + VPatBinaryW_VI; + multiclass VPatBinaryW_WV_WX vtilist> : VPatBinaryW_WV, @@ -6352,6 +6662,58 @@ //===----------------------------------------------------------------------===// defm PseudoVCOMPRESS : VPseudoVCPR_V; +//===----------------------------------------------------------------------===// +// Vector Crypto Instruction +//===----------------------------------------------------------------------===// +let Predicates = [HasStdExtZvbb] in { + defm PseudoVANDN : VPseudoVALU_VV_VX; + defm PseudoVBREV : VPseudoVALU_V; + defm PseudoVBREV8 : VPseudoVALU_V; + defm PseudoVREV8 : VPseudoVALU_V; + defm PseudoVCLZ : VPseudoVALU_V; + defm PseudoVCTZ : VPseudoVALU_V; + defm PseudoVCPOP : VPseudoVALU_V; + defm PseudoVROL : VPseudoVALU_VV_VX; + defm PseudoVROR : VPseudoVALU_VV_VX_VI; + defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI; +} // Predicates = [HasStdExtZvbb] + +let Predicates = [HasStdExtZvbc] in { + defm PseudoVCLMUL : VPseudoVCLMUL_VV_VX; + defm PseudoVCLMULH : VPseudoVCLMUL_VV_VX; +} // Predicates = [HasStdExtZvbc] + +let Predicates = [HasStdExtZvkg] in { + defm PseudoVGHSH : VPseudoVALU_VV_NoMask_Zvk; + defm PseudoVGMUL : VPseudoVALU_V_NoMask_Zvk; +} // Predicates = [HasStdExtZvkg] + +let Predicates = [HasStdExtZvkned] in { + defm PseudoVAESDF : VPseudoVALU_V_S_NoMask_Zvk; + defm PseudoVAESDM : VPseudoVALU_V_S_NoMask_Zvk; + defm PseudoVAESEF : VPseudoVALU_V_S_NoMask_Zvk; + defm PseudoVAESEM : VPseudoVALU_V_S_NoMask_Zvk; + defm PseudoVAESKF1 : VPseudoVALU_VI_NoMaskTU_Zvk; + defm PseudoVAESKF2 : VPseudoVALU_VI_NoMask_Zvk; + defm PseudoVAESZ : VPseudoVALU_S_NoMask_Zvk; +} // Predicates = [HasStdExtZvkned] + +let Predicates = [HasStdExtZvknha] in { + defm PseudoVSHA2CH : VPseudoVALU_VV_NoMask_Zvk; + defm PseudoVSHA2CL : VPseudoVALU_VV_NoMask_Zvk; + defm PseudoVSHA2MS : VPseudoVALU_VV_NoMask_Zvk; +} // Predicates = [HasStdExtZvknha] + +let Predicates = [HasStdExtZvksed] in { + defm PseudoVSM4K : VPseudoVALU_VI_NoMaskTU_Zvk; + defm PseudoVSM4R : VPseudoVALU_V_S_NoMask_Zvk; +} // Predicates = [HasStdExtZvksed] + +let Predicates = [HasStdExtZvksh] in { + defm PseudoVSM3C : VPseudoVALU_VI_NoMask_Zvk; + defm PseudoVSM3ME : VPseudoVALU_VV_NoMaskTU_Zvk; +} // Predicates = [HasStdExtZvksh] + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -6963,6 +7325,58 @@ defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllFloatVectors>; +//===----------------------------------------------------------------------===// +// Vector Crypto Instruction +//===----------------------------------------------------------------------===// +let Predicates = [HasStdExtZvbb] in { + defm : VPatBinaryV_VV_VX<"int_riscv_vandn", "PseudoVANDN", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vbrev8", "PseudoVBREV8", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vrev8", "PseudoVREV8", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vctz", "PseudoVCTZ", AllIntegerVectors>; + defm : VPatUnaryV_V<"int_riscv_vcpopv", "PseudoVCPOP", AllIntegerVectors>; + defm : VPatBinaryV_VV_VX<"int_riscv_vrol", "PseudoVROL", AllIntegerVectors>; + defm : VPatBinaryV_VV_VX_VI<"int_riscv_vror", "PseudoVROR", AllIntegerVectors>; + defm : VPatBinaryW_VV_VX_VI<"int_riscv_vwsll", "PseudoVWSLL", AllWidenableIntVectors>; +} // Predicates = [HasStdExtZvbb] + +let Predicates = [HasStdExtZvbc] in { + defm : VPatBinaryV_VV_VX<"int_riscv_vclmul", "PseudoVCLMUL", I64IntegerVectors>; + defm : VPatBinaryV_VV_VX<"int_riscv_vclmulh", "PseudoVCLMULH", I64IntegerVectors>; +} // Predicates = [HasStdExtZvbc] + +let Predicates = [HasStdExtZvkg] in { + defm : VPatBinaryV_VV_NoMask<"int_riscv_vghsh", "PseudoVGHSH", I32IntegerVectors>; + defm : VPatUnaryV_V_NoMask_Zvk<"int_riscv_vgmul", "PseudoVGMUL", I32IntegerVectors>; +} // Predicates = [HasStdExtZvkg] + +let Predicates = [HasStdExtZvkned] in { + defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdf", "PseudoVAESDF", I32IntegerVectors>; + defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesdm", "PseudoVAESDM", I32IntegerVectors>; + defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesef", "PseudoVAESEF", I32IntegerVectors>; + defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vaesem", "PseudoVAESEM", I32IntegerVectors>; + defm : VPatBinaryV_VI_NoMaskTA<"int_riscv_vaeskf1", "PseudoVAESKF1", I32IntegerVectors>; + defm : VPatBinaryV_VI_NoMask<"int_riscv_vaeskf2", "PseudoVAESKF2", I32IntegerVectors>; + defm : VPatUnaryV_S_NoMaskVectorCtypto<"int_riscv_vaesz", "PseudoVAESZ", I32IntegerVectors>; +} // Predicates = [HasStdExtZvkned] + +let Predicates = [HasStdExtZvknha] in { + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ch", "PseudoVSHA2CH", I32I64IntegerVectors>; + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2cl", "PseudoVSHA2CH", I32I64IntegerVectors>; + defm : VPatBinaryV_VV_NoMask<"int_riscv_vsha2ms", "PseudoVSHA2MS", I32I64IntegerVectors>; +} // Predicates = [HasStdExtZvknha] + +let Predicates = [HasStdExtZvksed] in { + defm : VPatBinaryV_VI_NoMaskTA<"int_riscv_vsm4k", "PseudoVSM4K", I32IntegerVectors>; + defm : VPatUnaryV_V_S_NoMask_Zvk<"int_riscv_vsm4r", "PseudoVSM4R", I32IntegerVectors>; +} // Predicates = [HasStdExtZvksed] + +let Predicates = [HasStdExtZvksh] in { + defm : VPatBinaryV_VI_NoMask<"int_riscv_vsm3c", "PseudoVSM3C", I32IntegerVectors>; + defm : VPatBinaryV_VV_NoMaskTA<"int_riscv_vsm3me", "PseudoVSM3ME", I32IntegerVectors>; +} // Predicates = [HasStdExtZvksh] + // Include the non-intrinsic ISel patterns include "RISCVInstrInfoVVLPatterns.td" include "RISCVInstrInfoVSDPatterns.td" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -27,8 +27,8 @@ def VCIX_XVV : VCIXType<0b1010>; def VCIX_XVW : VCIXType<0b1111>; -// The payload and timm5 operands are all marked as ImmArg in the IR -// intrinsic and will be target constant, so use TImmLeaf rather than ImmLeaf. +// The payload operands are all marked as ImmArg in the IR intrinsic and will +// be target constant, so use TImmLeaf rather than ImmLeaf. def payload1 : Operand, TImmLeaf(Imm);}]> { let ParserMatchClass = UImmAsmOperand<1>; let DecoderMethod = "decodeUImmOperand<1>"; @@ -50,18 +50,6 @@ let OperandNamespace = "RISCVOp"; } -def timm5 : Operand, TImmLeaf(Imm);}]> { - let ParserMatchClass = SImmAsmOperand<5>; - let EncoderMethod = "getImmOpValue"; - let DecoderMethod = "decodeSImmOperand<5>"; - let MCOperandPredicate = [{ - int64_t Imm; - if (MCOp.evaluateAsConstantImm(Imm)) - return isInt<5>(Imm); - return MCOp.isBareSymbolRef(); - }]; -} - class SwapVCIXIns { dag Ins = !con(funct6, !if(swap, rs2, rd), !if(swap, rd, rs2), rs1); } diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdf.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesdf.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdf.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdf.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdf_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdf_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdf.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdf.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesdm.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesdm.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdm.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesdm.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesdm_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesdm_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesdm.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesdm.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesef.ll b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesef.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesef.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesef.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesef.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesef.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesef.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesef.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesef.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesef.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesef.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesef.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesef.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesef_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesef_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesef.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesef.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesem.ll b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesem.ll @@ -0,0 +1,205 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesem.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesem.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesem.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesem.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesem.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesem.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesem.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesem.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesem.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesem.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesem.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesem_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesem_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesem.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesem.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf1.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaeskf1.nxv1i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vaeskf1.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv1i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv2i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vaeskf1.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv2i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv4i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vaeskf1.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv4i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv8i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vaeskf1.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv8i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vaeskf1.nxv16i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vaeskf1_vi_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vaeskf1_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vaeskf1.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf1.nxv16i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaeskf2.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaeskf2.nxv1i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv1i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv1i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv2i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv2i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv4i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v10, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv4i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv8i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v12, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv8i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaeskf2.nxv16i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vaeskf2_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaeskf2_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaeskf2.vi v8, v16, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaeskf2.nxv16i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vaesz.ll b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vaesz.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkned \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vaesz.vs.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vaesz.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vaesz.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vaesz.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vaesz.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vaesz.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vaesz_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vaesz_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vaesz.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vaesz.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn.ll b/llvm/test/CodeGen/RISCV/rvv/vandn.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vandn.ll @@ -0,0 +1,2173 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vandn.nxv1i8.nxv1i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i8.nxv2i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i8.nxv4i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i8.nxv8i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i8.nxv16i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i8.nxv32i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv64i8.nxv64i8( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i16.nxv1i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i16.nxv2i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i16.nxv4i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i16.nxv8i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i16.nxv16i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i16.nxv32i16( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vandn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vandn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vandn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vandn_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vandn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vandn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vandn.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vandn.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv64i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vandn_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vandn.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vandn.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vandn.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv32i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vandn_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vandn.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vandn.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vandn.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vandn.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv16i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vandn_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vandn_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vandn.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vandn_mask_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vandn.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vandn.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vandn.vx v8, v9, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vandn.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vandn.vx v8, v10, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vandn.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vandn.vx v8, v12, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vandn.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vandn_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vandn_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vandn.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vandn.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vandn.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vandn_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vandn.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vandn_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vandn.vx v8, v16, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vandn.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vbrev.nxv1i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv2i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv4i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv8i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv16i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vbrev.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv32i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vbrev.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv64i8( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vbrev.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv1i16( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv2i16( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv4i16( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv8i16( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vbrev.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv16i16( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vbrev.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv32i16( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vbrev.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv1i32( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv2i32( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv4i32( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vbrev.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv8i32( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vbrev.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv16i32( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vbrev.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv1i64( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vbrev.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv2i64( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vbrev.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv4i64( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vbrev.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev.nxv8i64( + , + , + iXLen); + +define @intrinsic_vbrev_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vbrev.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vbrev.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vbrev8.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vbrev8.nxv1i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv16i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv32i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv64i8( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv1i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv16i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv32i16( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv1i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv16i32( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv1i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vbrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv2i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vbrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv4i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vbrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vbrev8.nxv8i64( + , + , + iXLen); + +define @intrinsic_vbrev8_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vbrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vbrev8.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vbrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vbrev8_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vbrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vbrev8.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmul.ll b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vclmul.ll @@ -0,0 +1,478 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbc \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbc \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vclmul.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vclmul.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vclmul.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vclmul.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vclmul_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmul_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vclmul.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vclmul.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmul.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vclmul.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmul.vv v8, v10, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vclmul.vx v8, v10, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmul.vv v8, v12, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vclmul.vx v8, v12, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmul.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmul_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmul_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmul.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vclmul.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmul.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmul_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vclmul.vv v8, v16, v24 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vclmul.vx v8, v16, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmul.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vclmulh.ll @@ -0,0 +1,478 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbc \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbc \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vclmulh.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vclmulh_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vclmulh.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vclmulh_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vclmulh.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v9, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vclmulh.vx v8, v9, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v10, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vclmulh.vx v8, v10, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v12, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vclmulh.vx v8, v12, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclmulh.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vclmulh_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vclmulh.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vclmulh.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vclmulh_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vclmulh.vv v8, v16, v24 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vclmulh_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vclmulh.vx v8, v16, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vclmulh.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vclz.ll b/llvm/test/CodeGen/RISCV/rvv/vclz.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vclz.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vclz.nxv1i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv2i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv4i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv8i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv16i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vclz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv32i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vclz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv64i8( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vclz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv1i16( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv2i16( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv4i16( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv8i16( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vclz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv16i16( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vclz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv32i16( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vclz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv1i32( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv2i32( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv4i32( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vclz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv8i32( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vclz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv16i32( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vclz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv1i64( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vclz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv2i64( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vclz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv4i64( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vclz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vclz.nxv8i64( + , + , + iXLen); + +define @intrinsic_vclz_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vclz_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vclz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vclz.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vclz_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vclz_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vclz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vclz.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vcpopv.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vcpopv.nxv1i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv2i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv4i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv8i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv16i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vcpop.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv32i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vcpop.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv64i8( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vcpop.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv1i16( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv2i16( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv4i16( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv8i16( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vcpop.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv16i16( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vcpop.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv32i16( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vcpop.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv1i32( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv2i32( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv4i32( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vcpop.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv8i32( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vcpop.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv16i32( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vcpop.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv1i64( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vcpop.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv2i64( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vcpop.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv4i64( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vcpop.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vcpopv.nxv8i64( + , + , + iXLen); + +define @intrinsic_vcpopv_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vcpop.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vcpopv.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vcpopv_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vcpopv_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vcpop.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vcpopv.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vctz.ll b/llvm/test/CodeGen/RISCV/rvv/vctz.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vctz.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vctz.nxv1i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv2i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv4i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv8i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv16i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vctz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv32i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vctz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv64i8( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vctz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv1i16( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv2i16( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv4i16( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv8i16( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vctz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv16i16( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vctz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv32i16( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vctz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv1i32( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv2i32( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv4i32( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vctz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv8i32( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vctz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv16i32( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vctz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv1i64( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vctz.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv2i64( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vctz.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv4i64( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vctz.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vctz.nxv8i64( + , + , + iXLen); + +define @intrinsic_vctz_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vctz_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vctz.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vctz.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vctz_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vctz_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vctz.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vctz.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vghsh.ll b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vghsh.ll @@ -0,0 +1,126 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkg \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkg \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vghsh.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghsh_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghsh_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vghsh.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghsh.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghsh.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghsh_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghsh_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vghsh.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghsh.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghsh.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghsh_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghsh_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vghsh.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghsh.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghsh.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghsh_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghsh_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vghsh.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghsh.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vghsh.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vghsh_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vghsh_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vghsh.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vghsh.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vgmul.ll b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vgmul.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvkg \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvkg \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vgmul.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vgmul_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vgmul_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vgmul.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vgmul.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vgmul.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vgmul_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vgmul_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vgmul.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vgmul.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vgmul.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vgmul_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vgmul_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vgmul.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vgmul.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vgmul.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vgmul_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vgmul_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vgmul.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vgmul.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vgmul.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vgmul_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vgmul_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vgmul.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vgmul.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrev8.ll b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrev8.ll @@ -0,0 +1,951 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vrev8.nxv1i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv16i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv16i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv16i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv16i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv32i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv32i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv32i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv32i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv64i8( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv64i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv64i8( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv64i8( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv64i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv64i8( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv1i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv16i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv16i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv16i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv16i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv16i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv16i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv32i16( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv32i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv32i16( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv32i16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv32i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv32i16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv1i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv16i32( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv16i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv16i32( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv16i32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv16i32( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv1i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv1i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv1i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv1i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vrev8.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv1i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv2i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv2i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv2i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv2i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vrev8.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv2i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv4i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv4i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv4i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv4i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vrev8.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv4i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrev8.nxv8i64( + , + , + iXLen); + +define @intrinsic_vrev8_vs_nxv8i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vrev8_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrev8.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.nxv8i64( + undef, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vrev8.mask.nxv8i64( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vrev8_mask_vs_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vrev8_mask_vs_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vrev8.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrev8.mask.nxv8i64( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol.ll b/llvm/test/CodeGen/RISCV/rvv/vrol.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrol.ll @@ -0,0 +1,2173 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vrol.nxv1i8.nxv1i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i8.nxv2i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i8.nxv4i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i8.nxv8i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i8.nxv16i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i8.nxv32i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv64i8.nxv64i8( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i16.nxv1i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i16.nxv2i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i16.nxv4i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i16.nxv8i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i16.nxv16i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i16.nxv32i16( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vrol.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vrol.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vrol.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vrol_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrol.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vrol.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vrol.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vrol.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv64i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vrol_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vrol.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vrol.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vrol.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv32i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vrol_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vrol.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vrol.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vrol.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vrol.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv16i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vrol_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vrol_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vrol.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vrol_mask_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vrol.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vrol.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vrol.vx v8, v9, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vrol.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vrol.vx v8, v10, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vrol.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vrol.vx v8, v12, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vrol.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vrol_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vrol_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vrol.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vrol.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vrol.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vrol_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vrol_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vrol.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vrol_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vrol.vx v8, v16, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vrol.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vror.ll b/llvm/test/CodeGen/RISCV/rvv/vror.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vror.ll @@ -0,0 +1,2899 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.riscv.vror.nxv1i8.nxv1i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i8.nxv2i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i8.nxv4i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i8.nxv8i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i8.nxv16i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i8.nxv32i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv64i8.nxv64i8( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv64i8_nxv64i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv64i8.nxv64i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv64i8.nxv64i8( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv64i8_nxv64i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv64i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8r.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv64i8.nxv64i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i16.nxv1i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i16.nxv2i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i16.nxv4i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i16.nxv8i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i16.nxv16i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i16.nxv32i16( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv32i16_nxv32i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i16.nxv32i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i16.nxv32i16( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv32i16_nxv32i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv32i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i16.nxv32i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i32.nxv16i32( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv16i32_nxv16i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i32.nxv16i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i64.nxv1i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv1i64_nxv1i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i64.nxv1i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i64.nxv1i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vror.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i64.nxv1i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i64.nxv2i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv2i64_nxv2i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i64.nxv2i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i64.nxv2i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vror.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i64.nxv2i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i64.nxv4i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv4i64_nxv4i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i64.nxv4i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i64.nxv4i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vror.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i64.nxv4i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i64.nxv8i64( + , + , + , + iXLen) + +define @intrinsic_vror_vv_nxv8i64_nxv8i64( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vror.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i64.nxv8i64( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i64.nxv8i64( + , + , + , + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vv_nxv8i64_nxv8i64( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vror.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i64.nxv8i64( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vror.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vror.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv64i8.i8( + , + , + i8, + iXLen) + +define @intrinsic_vror_vx_nxv64i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv64i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv64i8.i8( + , + , + i8, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv64i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu +; CHECK-NEXT: vror.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vror.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vror.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv32i16.i16( + , + , + i16, + iXLen) + +define @intrinsic_vror_vx_nxv32i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv32i16.i16( + , + , + i16, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv32i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vror.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv1i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv2i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vror.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv4i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vror.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv8i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vror.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv16i32.i32( + , + , + i32, + iXLen) + +define @intrinsic_vror_vx_nxv16i32_i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vror_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma +; CHECK-NEXT: vror.vx v8, v8, a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv16i32.i32( + , + , + i32, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv16i32_i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vx_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vror.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv1i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv1i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv1i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv1i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vror.vv v8, v9, v10, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv1i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vror.vx v8, v9, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv2i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv2i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv2i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv2i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vror.vv v8, v10, v12, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv2i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu +; RV64-NEXT: vror.vx v8, v10, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv4i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv4i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv4i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv4i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vror.vv v8, v12, v16, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv4i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu +; RV64-NEXT: vror.vx v8, v12, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vror.nxv8i64.i64( + , + , + i64, + iXLen) + +define @intrinsic_vror_vx_nxv8i64_i64( %0, i64 %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vror_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vror.vv v8, v8, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vror.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i64.i64( + undef, + %0, + i64 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vror.mask.nxv8i64.i64( + , + , + i64, + , + iXLen, + iXLen) + +define @intrinsic_vror_mask_vx_nxv8i64_i64( %0, %1, i64 %2, %3, iXLen %4) nounwind { +; RV32-LABEL: intrinsic_vror_mask_vx_nxv8i64_i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vror.vv v8, v16, v24, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vror_mask_vx_nxv8i64_i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vror.vx v8, v16, a0, v0.t +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv64i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv64i8.i8( + undef, + %0, + i8 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv64i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv64i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv64i8.i8( + %0, + %1, + i8 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv32i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv32i16.i16( + undef, + %0, + i16 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv32i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv32i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv32i16.i16( + %0, + %1, + i16 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv16i32.i32( + undef, + %0, + i32 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv16i32_i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv16i32.i32( + %0, + %1, + i32 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv1i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv1i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv1i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vror.vi v8, v9, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv1i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv2i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv2i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv2i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vror.vi v8, v10, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv2i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv4i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv4i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv4i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vror.vi v8, v12, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv4i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vror_vi_nxv8i64_i64( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vror_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vror.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.nxv8i64.i64( + undef, + %0, + i64 2, + iXLen %1) + + ret %a +} + +define @intrinsic_vror_mask_vi_nxv8i64_i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vror_mask_vi_nxv8i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vror.vi v8, v16, 2, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vror.mask.nxv8i64.i64( + %0, + %1, + i64 2, + %2, + iXLen %3, iXLen 1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ch.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknha \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknha \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsha2ch.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv1i64.nxv1i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv2i64.nxv2i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv4i64.nxv4i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ch.nxv8i64.nxv8i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ch_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ch_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ch.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2cl.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsha2cl.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv1i64.nxv1i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv2i64.nxv2i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2cl_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2cl_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: vsha2ch.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2cl.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsha2ms.ll @@ -0,0 +1,223 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvknha,+experimental-zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvknha,+experimental-zvknhb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsha2ms.nxv1i32.nxv1i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv1i32_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv1i32.nxv1i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv2i32.nxv2i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv2i32_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv2i32.nxv2i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv4i32.nxv4i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv4i32_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv4i32.nxv4i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv8i32.nxv8i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv8i32_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv8i32.nxv8i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv16i32.nxv16i32( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv16i32_nxv16i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv16i32.nxv16i32( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv1i64.nxv1i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv1i64_nxv1i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v9, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv1i64.nxv1i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv2i64.nxv2i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv2i64_nxv2i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v10, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv2i64.nxv2i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv4i64.nxv4i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv4i64_nxv4i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v12, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv4i64.nxv4i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsha2ms.nxv8i64.nxv8i64( + , + , + , + iXLen, + iXLen) + +define @intrinsic_vsha2ms_vv_nxv8i64_nxv8i64( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vsha2ms_vv_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma +; CHECK-NEXT: vsha2ms.vv v8, v16, v24 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsha2ms.nxv8i64.nxv8i64( + %0, + %1, + %2, + iXLen %3, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3c.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm3c.nxv1i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv1i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv1i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv2i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv2i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v9, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv2i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv4i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv4i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v10, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv4i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv8i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv8i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v12, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv8i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm3c.nxv16i32.i32( + , + , + iXLen, + iXLen, + iXLen) + +define @intrinsic_vsm3c_vi_nxv16i32_i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3c_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsm3c.vi v8, v16, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3c.nxv16i32.i32( + %0, + %1, + iXLen 2, + iXLen %2, + iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm3me.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksh \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm3me.nxv1i32.nxv1i32( + , + , + , + iXLen) + +define @intrinsic_vsm3me_vv_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm3me.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv2i32.nxv2i32( + , + , + , + iXLen) + +define @intrinsic_vsm3me_vv_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm3me.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv4i32.nxv4i32( + , + , + , + iXLen) + +define @intrinsic_vsm3me_vv_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm3me.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv8i32.nxv8i32( + , + , + , + iXLen) + +define @intrinsic_vsm3me_vv_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm3me.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vsm3me.nxv16i32.nxv16i32( + , + , + , + iXLen) + +define @intrinsic_vsm3me_vv_nxv16i32_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm3me_vv_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm3me.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm3me.nxv16i32.nxv16i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4k.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm4k.nxv1i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv1i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm4k.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv1i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv2i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv2i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm4k.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv2i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv4i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv4i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm4k.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv4i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv8i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv8i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm4k.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv8i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vsm4k.nxv16i32.i32( + , + , + iXLen, + iXLen) + +define @intrinsic_vsm4k_vi_nxv16i32_i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vsm4k_vi_nxv16i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vsm4k.vi v8, v8, 2 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4k.nxv16i32.i32( + undef, + %0, + iXLen 2, + iXLen %1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsm4r.ll @@ -0,0 +1,206 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvksed \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vsm4r.vv.nxv1i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vv.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vv_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vv_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsm4r.vv v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vv.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vs.nxv1i32( + , + , + iXLen, iXLen); + + +define @intrinsic_vsm4r_vs_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vs_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma +; CHECK-NEXT: vsm4r.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vs.nxv1i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vs.nxv2i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vs_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vs_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma +; CHECK-NEXT: vsm4r.vs v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vs.nxv2i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vs.nxv4i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vs_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vs_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma +; CHECK-NEXT: vsm4r.vs v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vs.nxv4i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vs.nxv8i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vs_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vs_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma +; CHECK-NEXT: vsm4r.vs v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vs.nxv8i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} + +declare @llvm.riscv.vsm4r.vs.nxv16i32( + , + , + iXLen, iXLen); + +define @intrinsic_vsm4r_vs_nxv16i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vsm4r_vs_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma +; CHECK-NEXT: vsm4r.vs v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vsm4r.vs.nxv16i32( + %0, + %1, + iXLen %2, iXLen 2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vwsll.ll @@ -0,0 +1,1955 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i16_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vwsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.nxv1i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.nxv2i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i16_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vwsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.nxv2i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.nxv4i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i16_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vwsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.nxv4i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.nxv8i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i16_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vwsll.vv v8, v10, v11, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.nxv8i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vwsll.vv v12, v8, v10 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.nxv16i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i16_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vwsll.vv v8, v12, v14, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.nxv16i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vwsll.vv v16, v8, v12 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.nxv32i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv32i16_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vwsll.vv v8, v16, v20, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.nxv32i8( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.nxv1i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i32_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vwsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.nxv1i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.nxv2i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i32_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vwsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.nxv2i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.nxv4i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i32_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vwsll.vv v8, v10, v11, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.nxv4i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vwsll.vv v12, v8, v10 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.nxv8i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i32_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vwsll.vv v8, v12, v14, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.nxv8i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vwsll.vv v16, v8, v12 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.nxv16i16( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv16i32_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vwsll.vv v8, v16, v20, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.nxv16i16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.nxv1i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv1i64_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vwsll.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.nxv1i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vwsll.vv v10, v8, v9 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.nxv2i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv2i64_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vwsll.vv v8, v10, v11, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.nxv2i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vwsll.vv v12, v8, v10 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.nxv4i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv4i64_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vwsll.vv v8, v12, v14, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.nxv4i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32( + , + , + , + iXLen); + +define @intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vwsll.vv v16, v8, v12 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.nxv8i32( + undef, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vv_nxv8i64_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vwsll.vv v8, v16, v20, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.nxv8i32( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vwsll_vx_nxv1i16_nxv1i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i16_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vwsll.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i16_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; CHECK-NEXT: vwsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vwsll_vx_nxv2i16_nxv2i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i16_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma +; CHECK-NEXT: vwsll.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i16_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vwsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vwsll_vx_nxv4i16_nxv4i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i16_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vwsll.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i16_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vwsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vwsll_vx_nxv8i16_nxv8i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i16_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vwsll.vx v10, v8, a0 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i16_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vwsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vwsll_vx_nxv16i16_nxv16i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i16_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vwsll.vx v12, v8, a0 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i16_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vwsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8( + , + , + i8, + iXLen); + +define @intrinsic_vwsll_vx_nxv32i16_nxv32i8_i8( %0, i8 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv32i16_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma +; CHECK-NEXT: vwsll.vx v16, v8, a0 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8( + undef, + %0, + i8 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8( + , + , + i8, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv32i16_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu +; CHECK-NEXT: vwsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vwsll_vx_nxv1i32_nxv1i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i32_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma +; CHECK-NEXT: vwsll.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16( + , + , + i16, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i32_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu +; CHECK-NEXT: vwsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vwsll_vx_nxv2i32_nxv2i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i32_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma +; CHECK-NEXT: vwsll.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16( + , + , + i16, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i32_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu +; CHECK-NEXT: vwsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vwsll_vx_nxv4i32_nxv4i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i32_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vwsll.vx v10, v8, a0 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16( + , + , + i16, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i32_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vwsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vwsll_vx_nxv8i32_nxv8i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i32_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vwsll.vx v12, v8, a0 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16( + , + , + i16, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i32_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vwsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16( + , + , + i16, + iXLen); + +define @intrinsic_vwsll_vx_nxv16i32_nxv16i16_i16( %0, i16 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv16i32_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma +; CHECK-NEXT: vwsll.vx v16, v8, a0 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16( + undef, + %0, + i16 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16( + , + , + i16, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16_i16( %0, %1, i16 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv16i32_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vwsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vwsll_vx_nxv1i64_nxv1i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vwsll.vx v9, v8, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32( + , + , + i32, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu +; CHECK-NEXT: vwsll.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vwsll_vx_nxv2i64_nxv2i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vwsll.vx v10, v8, a0 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32( + , + , + i32, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vwsll.vx v8, v10, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vwsll_vx_nxv4i64_nxv4i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vwsll.vx v12, v8, a0 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32( + , + , + i32, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu +; CHECK-NEXT: vwsll.vx v8, v12, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32( + , + , + i32, + iXLen); + +define @intrinsic_vwsll_vx_nxv8i64_nxv8i32( %0, i32 %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vx_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vwsll.vx v16, v8, a0 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32( + undef, + %0, + i32 %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32( + , + , + i32, + , + iXLen, + iXLen); + +define @intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32( %0, %1, i32 %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vx_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vwsll.vx v8, v16, a0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv1i16_nxv1i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv1i16_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vwsll.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i16.nxv1i8.i8( + undef, + %0, + i8 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv1i16_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv1i16_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vwsll.vi v8, v9, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i16.nxv1i8.i8( + %0, + %1, + i8 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv2i16_nxv2i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv2i16_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vwsll.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i16.nxv2i8.i8( + undef, + %0, + i8 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv2i16_nxv2i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv2i16_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vwsll.vi v8, v9, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i16.nxv2i8.i8( + %0, + %1, + i8 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv4i16_nxv4i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv4i16_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vwsll.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i16.nxv4i8.i8( + undef, + %0, + i8 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv4i16_nxv4i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv4i16_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vwsll.vi v8, v9, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i16.nxv4i8.i8( + %0, + %1, + i8 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv8i16_nxv8i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv8i16_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vwsll.vi v10, v8, 1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i16.nxv8i8.i8( + undef, + %0, + i8 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv8i16_nxv8i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv8i16_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vwsll.vi v8, v10, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i16.nxv8i8.i8( + %0, + %1, + i8 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv16i16_nxv16i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv16i16_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vwsll.vi v12, v8, 1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv16i16.nxv16i8.i8( + undef, + %0, + i8 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv16i16_nxv16i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv16i16_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vwsll.vi v8, v12, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv16i16.nxv16i8.i8( + %0, + %1, + i8 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv32i16_nxv32i8_i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv32i16_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vwsll.vi v16, v8, 1 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv32i16.nxv32i8.i8( + undef, + %0, + i8 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv32i16_nxv32i8_i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv32i16_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vwsll.vi v8, v16, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv32i16.nxv32i8.i8( + %0, + %1, + i8 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv1i32_nxv1i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv1i32_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vwsll.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i32.nxv1i16.i16( + undef, + %0, + i16 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv1i32_nxv1i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv1i32_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vwsll.vi v8, v9, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i32.nxv1i16.i16( + %0, + %1, + i16 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv2i32_nxv2i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv2i32_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vwsll.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i32.nxv2i16.i16( + undef, + %0, + i16 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv2i32_nxv2i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv2i32_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vwsll.vi v8, v9, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i32.nxv2i16.i16( + %0, + %1, + i16 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv4i32_nxv4i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv4i32_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vwsll.vi v10, v8, 1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i32.nxv4i16.i16( + undef, + %0, + i16 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv4i32_nxv4i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv4i32_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vwsll.vi v8, v10, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i32.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv8i32_nxv8i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv8i32_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vwsll.vi v12, v8, 1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i32.nxv8i16.i16( + undef, + %0, + i16 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv8i32_nxv8i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv8i32_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vwsll.vi v8, v12, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i32.nxv8i16.i16( + %0, + %1, + i16 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv16i32_nxv16i16_i16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv16i32_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vwsll.vi v16, v8, 1 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv16i32.nxv16i16.i16( + undef, + %0, + i16 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv16i32_nxv16i16_i16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv16i32_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vwsll.vi v8, v16, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv16i32.nxv16i16.i16( + %0, + %1, + i16 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv1i64_nxv1i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vwsll.vi v9, v8, 1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv1i64.nxv1i32.i32( + undef, + %0, + i32 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv1i64_nxv1i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vwsll.vi v8, v9, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv1i64.nxv1i32.i32( + %0, + %1, + i32 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv2i64_nxv2i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vwsll.vi v10, v8, 1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv2i64.nxv2i32.i32( + undef, + %0, + i32 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv2i64_nxv2i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vwsll.vi v8, v10, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv2i64.nxv2i32.i32( + %0, + %1, + i32 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv4i64_nxv4i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vwsll.vi v12, v8, 1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv4i64.nxv4i32.i32( + undef, + %0, + i32 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv4i64_nxv4i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vwsll.vi v8, v12, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv4i64.nxv4i32.i32( + %0, + %1, + i32 1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vwsll_vi_nxv8i64_nxv8i32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vwsll_vi_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vwsll.vi v16, v8, 1 +; CHECK-NEXT: vmv8r.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.nxv8i64.nxv8i32.i32( + undef, + %0, + i32 1, + iXLen %1) + + ret %a +} + +define @intrinsic_vwsll_mask_vi_nxv8i64_nxv8i32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vwsll_mask_vi_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vwsll.vi v8, v16, 1, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vwsll.mask.nxv8i64.nxv8i32.i32( + %0, + %1, + i32 1, + %2, + iXLen %3, iXLen 1) + + ret %a +}