diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -469,6 +469,20 @@ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + // For atomic operations without mask + // Input: (base, index, value, vl) + class RISCVAMONoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMPointerType>, llvm_anyvector_ty, llvm_anyvector_ty, + llvm_anyint_ty], + [NoCapture>]>, RISCVVIntrinsic; + // For atomic operations with mask + // Input: (base, index, value, mask, vl) + class RISCVAMOMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMPointerType>, llvm_anyvector_ty, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [NoCapture>]>, RISCVVIntrinsic; multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -568,6 +582,10 @@ def "int_riscv_" #NAME :RISCVConversionNoMask; def "int_riscv_" # NAME # "_mask" : RISCVConversionMask; } + multiclass RISCVAMO { + def "int_riscv_" # NAME : RISCVAMONoMask; + def "int_riscv_" # NAME # "_mask" : RISCVAMOMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -578,6 +596,16 @@ defm vsxe: RISCVIStore; defm vsuxe: RISCVIStore; + defm vamoswap : RISCVAMO; + defm vamoadd : RISCVAMO; + defm vamoxor : RISCVAMO; + defm vamoand : RISCVAMO; + defm vamoor : RISCVAMO; + defm vamomin : RISCVAMO; + defm vamomax : RISCVAMO; + defm vamominu : RISCVAMO; + defm vamomaxu : RISCVAMO; + defm vadd : RISCVBinaryAAX; defm vsub : RISCVBinaryAAX; defm vrsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -73,10 +73,38 @@ list m = [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8]; } +class MxSet { + list m = !cond(!eq(eew, 8) : [V_MF8, V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], + !eq(eew, 16) : [V_MF4, V_MF2, V_M1, V_M2, V_M4, V_M8], + !eq(eew, 32) : [V_MF2, V_M1, V_M2, V_M4, V_M8], + !eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]); +} + class shift_amount { int val = !if(!eq(num, 1), 0, !add(1, shift_amount.val)); } +class octuple_from_str { + int ret = !cond(!eq(MX, "MF8") : 1, + !eq(MX, "MF4") : 2, + !eq(MX, "MF2") : 4, + !eq(MX, "M1") : 8, + !eq(MX, "M2") : 16, + !eq(MX, "M4") : 32, + !eq(MX, "M8") : 64); +} + +class octuple_to_str { + string ret = !if(!eq(octuple, 1), "MF8", + !if(!eq(octuple, 2), "MF4", + !if(!eq(octuple, 4), "MF2", + !if(!eq(octuple, 8), "M1", + !if(!eq(octuple, 16), "M2", + !if(!eq(octuple, 32), "M4", + !if(!eq(octuple, 64), "M8", + "NoDef"))))))); +} + // Output pattern for X0 used to represent VLMAX in the pseudo instructions. def VLMax : OutPatFrag<(ops), (XLenVT X0)>; @@ -826,6 +854,74 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoAMOWDNoMask : + Pseudo<(outs GetVRegNoV0.R:$vd_wd), + (ins GPR:$rs1, + Op1Class:$vs2, + GetVRegNoV0.R:$vd, + GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 1; + let hasSideEffects = 1; + let usesCustomInserter = 1; + let Constraints = "$vd_wd = $vd"; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoAMOWDMask : + Pseudo<(outs GetVRegNoV0.R:$vd_wd), + (ins GPR:$rs1, + Op1Class:$vs2, + GetVRegNoV0.R:$vd, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 1; + let hasSideEffects = 1; + let usesCustomInserter = 1; + let Constraints = "$vd_wd = $vd"; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +multiclass VPseudoAMOEI { + // Standard scalar AMO supports 32, 64, and 128 Mem data bits, + // and in the base vector "V" extension, only SEW up to ELEN = max(XLEN, FLEN) + // are required to be supported. + // therefore only [32, 64] is allowed here. + foreach sew = [32, 64] in { + foreach lmul = MxSet.m in { + defvar octuple_lmul = octuple_from_str.ret; + // Calculate emul = eew * lmul / sew + defvar octuple_emul = !sra(!mul(eew, octuple_lmul), shift_amount.val); + if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { + defvar emulMX = octuple_to_str.ret; + defvar lmulMX = octuple_to_str.ret; + defvar emul= !cast("V_" # emulMX); + defvar lmul = !cast("V_" # lmulMX); + let VLMul = lmul.value in { + def "_WD_" # lmulMX # "_" # emulMX : VPseudoAMOWDNoMask; + def "_WD_" # lmulMX # "_" # emulMX # "_MASK" : VPseudoAMOWDMask; + } + } + } + } +} + +multiclass VPseudoAMO { + foreach eew = EEWList in + defm "EI" # eew : VPseudoAMOEI; +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1508,6 +1604,42 @@ (mask_type V0), (NoX0 GPR:$vl), sew)>; +class VPatAMOWDNoMask : + Pat<(result_type (!cast(intrinsic_name) + GPR:$rs1, + (op1_type op1_reg_class:$vs2), + (result_type vlmul.vrclass:$vd), + (XLenVT GPR:$vl))), + (!cast(inst # "_WD_" # vlmul.MX # "_" # emul.MX) + $rs1, $vs2, $vd, + (NoX0 GPR:$vl), sew)>; + +class VPatAMOWDMask : + Pat<(result_type (!cast(intrinsic_name # "_mask") + GPR:$rs1, + (op1_type op1_reg_class:$vs2), + (result_type vlmul.vrclass:$vd), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst # "_WD_" # vlmul.MX # "_" # emul.MX # "_MASK") + $rs1, $vs2, $vd, + (mask_type V0), (NoX0 GPR:$vl), sew)>; + multiclass VPatUSLoad +{ + def : VPatAMOWDNoMask; + def : VPatAMOWDMask; +} + +multiclass VPatAMOV_WD vtilist> { + foreach eew = EEWList in { + foreach vti = vtilist in { + if !or(!eq(vti.SEW, 32), !eq(vti.SEW, 64)) then { + defvar octuple_lmul = octuple_from_str.ret; + // Calculate emul = eew * lmul / sew + defvar octuple_emul = !sra(!mul(eew, octuple_lmul), shift_amount.val); + if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { + defvar emulMX = octuple_to_str.ret; + defvar offsetVti = !cast("VI" # eew # emulMX); + defvar inst_ei = inst # "EI" # eew; + defm : VPatAMOWD; + } + } + } + } +} + //===----------------------------------------------------------------------===// // Pseudo instructions //===----------------------------------------------------------------------===// @@ -2460,6 +2630,19 @@ } //===----------------------------------------------------------------------===// +// 8. Vector AMO Operations +//===----------------------------------------------------------------------===// +defm PseudoVAMOSWAP : VPseudoAMO; +defm PseudoVAMOADD : VPseudoAMO; +defm PseudoVAMOXOR : VPseudoAMO; +defm PseudoVAMOAND : VPseudoAMO; +defm PseudoVAMOOR : VPseudoAMO; +defm PseudoVAMOMIN : VPseudoAMO; +defm PseudoVAMOMAX : VPseudoAMO; +defm PseudoVAMOMINU : VPseudoAMO; +defm PseudoVAMOMAXU : VPseudoAMO; + +//===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// @@ -3023,12 +3206,32 @@ } } } +} // Predicates = [HasStdExtV] +//===----------------------------------------------------------------------===// +// 8. Vector AMO Operations +//===----------------------------------------------------------------------===// +let Predicates = [HasStdExtZvamo] in { + defm "" : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamoadd", "PseudoVAMOADD", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamoxor", "PseudoVAMOXOR", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamoand", "PseudoVAMOAND", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamoor", "PseudoVAMOOR", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamomin", "PseudoVAMOMIN", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamomax", "PseudoVAMOMAX", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamominu", "PseudoVAMOMINU", AllIntegerVectors>; + defm "" : VPatAMOV_WD<"int_riscv_vamomaxu", "PseudoVAMOMAXU", AllIntegerVectors>; +} // Predicates = [HasStdExtZvamo] + +let Predicates = [HasStdExtZvamo, HasStdExtF] in { + defm "" : VPatAMOV_WD<"int_riscv_vamoswap", "PseudoVAMOSWAP", AllFloatVectors>; +} // Predicates = [HasStdExtZvamo, HasStdExtF] //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// +let Predicates = [HasStdExtV] in { //===----------------------------------------------------------------------===// // 12.1. Vector Single-Width Integer Add and Subtract //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamoadd_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoadd-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoaddei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoaddei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoaddei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoaddei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoaddei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoaddei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoadd.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoadd_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoadd_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoaddei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoadd.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoand.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamoand_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamoand_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoand-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoand.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoandei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoandei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoandei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoandei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoandei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoandei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoandei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoandei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoand.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoand_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoand_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoand_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoand_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoandei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoand.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamomax.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamomax_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamomax_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamomax-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamomax.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomax.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamomax_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomax_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamomax_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomax_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomax.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamomaxu_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamomaxu-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamomaxuei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamomaxuei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamomaxuei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamomaxuei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamomaxuei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamomaxuei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamomaxuei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamomaxuei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomaxu.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamomaxu_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomaxu_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamomaxuei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomaxu.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamomin.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamomin_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamomin_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamomin-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamomin.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamominei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamominei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamomin.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamomin_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamomin_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamomin_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamomin_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamomin.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamominu.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamominu_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamominu_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamominu-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamominu.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamominuei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamominuei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamominuei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamominuei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamominuei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamominuei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamominuei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamominuei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamominu.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamominu_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamominu_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamominu_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamominu_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamominuei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamominu.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoor.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamoor_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamoor_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoor-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoor.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoorei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoorei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoorei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoorei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoorei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoorei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoorei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoor.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoor_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoor_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoor_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoor_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoorei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoor.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv32.ll @@ -0,0 +1,2054 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16f32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv16f32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16f32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv16f32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16f32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv16f32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoswap-rv64.ll @@ -0,0 +1,3426 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16f32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv16f32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16f32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16f32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv16f32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16f32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv16f32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv16f32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv16f32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv16f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv16f32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv1f64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv1f64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv1f64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoswapei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv1f64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv2f64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv2f64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv2f64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoswapei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv2f64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv4f64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv4f64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv4f64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoswapei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv4f64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoswap.nxv8f64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoswap_v_nxv8f64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_v_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.nxv8f64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoswap_mask_v_nxv8f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoswapei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoswap.mask.nxv8f64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv32.ll @@ -0,0 +1,734 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i32( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i32( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i32( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i32( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv16i32.nxv16i32( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv16i32_nxv16i32( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i16( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i16( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i16( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i16( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv16i32.nxv16i16( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv16i32_nxv16i16( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i8( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i8( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i8( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i8( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv16i32.nxv16i8( + *, + , + , + i32); + +define @intrinsic_vamoxor_v_nxv16i32_nxv16i8( *%0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8( + *, + , + , + , + i32); + +define @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vamoxor-rv64.ll @@ -0,0 +1,1714 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zvamo -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv1r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv2r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei64.v v28, (a0), v16, v28 +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu +; CHECK-NEXT: vle32.v v28, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei64.v v28, (a0), v16, v28, v0.t +; CHECK-NEXT: vmv4r.v v16, v28 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i64.nxv1i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i64_nxv1i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei64.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i64.nxv1i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei64.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i64.nxv2i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i64_nxv2i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei64.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i64.nxv2i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei64.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i64.nxv4i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i64_nxv4i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei64.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i64.nxv4i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei64.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i64.nxv8i64( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i64_nxv8i64( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei64.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i64.nxv8i64( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei64.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i64( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv16i32.nxv16i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv16i32_nxv16i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv16i32.nxv16i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i64.nxv1i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i64_nxv1i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i64.nxv1i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei32.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i64.nxv2i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i64_nxv2i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei32.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i64.nxv2i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei32.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i64.nxv4i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i64_nxv4i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei32.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i64.nxv4i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei32.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i64.nxv8i32( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i64_nxv8i32( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei32.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i64.nxv8i32( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei32.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i32( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv16i32.nxv16i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv16i32_nxv16i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv16i32.nxv16i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i64.nxv1i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i64_nxv1i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i64.nxv1i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei16.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i64.nxv2i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i64_nxv2i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei16.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i64.nxv2i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei16.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i64.nxv4i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i64_nxv4i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei16.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i64.nxv4i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei16.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i64.nxv8i16( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i64_nxv8i16( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei16.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i64.nxv8i16( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei16.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i16( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i32.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i32_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i32.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i32.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i32.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i32_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i32.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i32.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i32.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i32_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i32.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu +; CHECK-NEXT: vamoxorei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i32.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i32.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i32_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i32.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu +; CHECK-NEXT: vamoxorei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i32.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv16i32.nxv16i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv16i32_nxv16i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv16i32.nxv16i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv16i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu +; CHECK-NEXT: vamoxorei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv16i32.nxv16i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv1i64.nxv1i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv1i64_nxv1i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17 +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv1i64.nxv1i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu +; CHECK-NEXT: vamoxorei8.v v17, (a0), v16, v17, v0.t +; CHECK-NEXT: vmv1r.v v16, v17 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv1i64.nxv1i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv2i64.nxv2i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv2i64_nxv2i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei8.v v18, (a0), v16, v18 +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv2i64.nxv2i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu +; CHECK-NEXT: vamoxorei8.v v18, (a0), v16, v18, v0.t +; CHECK-NEXT: vmv2r.v v16, v18 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv2i64.nxv2i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv4i64.nxv4i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv4i64_nxv4i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei8.v v20, (a0), v16, v20 +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv4i64.nxv4i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu +; CHECK-NEXT: vamoxorei8.v v20, (a0), v16, v20, v0.t +; CHECK-NEXT: vmv4r.v v16, v20 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv4i64.nxv4i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vamoxor.nxv8i64.nxv8i8( + *, + , + , + i64); + +define @intrinsic_vamoxor_v_nxv8i64_nxv8i8( *%0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei8.v v8, (a0), v16, v8 +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.nxv8i64.nxv8i8( + *%0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8( + *, + , + , + , + i64); + +define @intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8( *%0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vamoxor_mask_v_nxv8i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu +; CHECK-NEXT: vamoxorei8.v v8, (a0), v16, v8, v0.t +; CHECK-NEXT: vmv8r.v v16, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vamoxor.mask.nxv8i64.nxv8i8( + *%0, + %1, + %2, + %3, + i64 %4) + + ret %a +}