diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -1596,12 +1596,18 @@ def int_aarch64_sve_tbl : AdvSIMD_SVE_TBL_Intrinsic; def int_aarch64_sve_trn1 : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_sve_trn2 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_trn1q : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_trn2q : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic; def int_aarch64_sve_uzp1 : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_sve_uzp2 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uzp1q : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uzp2q : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_sve_zip1 : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_sve_zip2 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_zip1q : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_zip2q : AdvSIMD_2VectorArg_Intrinsic; // // Logical operations diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -1964,12 +1964,12 @@ defm LD1RO_H : sve_mem_ldor_ss<0b01, "ld1roh", Z_h, ZPR16, GPR64NoXZRshifted16>; defm LD1RO_W : sve_mem_ldor_ss<0b10, "ld1row", Z_s, ZPR32, GPR64NoXZRshifted32>; defm LD1RO_D : sve_mem_ldor_ss<0b11, "ld1rod", Z_d, ZPR64, GPR64NoXZRshifted64>; - def ZIP1_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b00, 0, "zip1">; - def ZIP2_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b00, 1, "zip2">; - def UZP1_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b01, 0, "uzp1">; - def UZP2_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b01, 1, "uzp2">; - def TRN1_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b11, 0, "trn1">; - def TRN2_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b11, 1, "trn2">; + defm ZIP1_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b00, 0, "zip1", int_aarch64_sve_zip1q>; + defm ZIP2_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b00, 1, "zip2", int_aarch64_sve_zip2q>; + defm UZP1_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b01, 0, "uzp1", int_aarch64_sve_uzp1q>; + defm UZP2_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b01, 1, "uzp2", int_aarch64_sve_uzp2q>; + defm TRN1_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b11, 0, "trn1", int_aarch64_sve_trn1q>; + defm TRN2_ZZZ_128 : sve_int_perm_bin_perm_128_zz<0b11, 1, "trn2", int_aarch64_sve_trn2q>; } let Predicates = [HasSVE2] in { diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td --- a/llvm/lib/Target/AArch64/SVEInstrFormats.td +++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -7727,6 +7727,18 @@ let Inst{4-0} = Zd; } +multiclass sve_int_perm_bin_perm_128_zz<bits<2> opc, bit P, string asm, SDPatternOperator op> { + def NAME : sve_int_perm_bin_perm_128_zz<opc, P, asm>; + + def : SVE_2_Op_Pat<nxv16i8, op, nxv16i8, nxv16i8, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv8i16, op, nxv8i16, nxv8i16, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv8f16, op, nxv8f16, nxv8f16, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv8bf16, op, nxv8bf16, nxv8bf16, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv4i32, op, nxv4i32, nxv4i32, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv4f32, op, nxv4f32, nxv4f32, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv2i64, op, nxv2i64, nxv2i64, !cast<Instruction>(NAME)>; + def : SVE_2_Op_Pat<nxv2f64, op, nxv2f64, nxv2f64, !cast<Instruction>(NAME)>; +} /// Addressing modes def am_sve_indexed_s4 :ComplexPattern<i64, 2, "SelectAddrModeIndexedSVE<-8,7>", [], [SDNPWantRoot]>; diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select-matmul-fp64.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select-matmul-fp64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select-matmul-fp64.ll @@ -0,0 +1,512 @@ +; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+sve,+f64mm -asm-verbose=0 < %s -o - | FileCheck %s + +; +; TRN1Q +; + +define <vscale x 16 x i8> @trn1_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { +; CHECK-LABEL: trn1_i8: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.trn1q.nxv16i8(<vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @trn1_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { +; CHECK-LABEL: trn1_i16: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.trn1q.nxv8i16(<vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + +define <vscale x 4 x i32> @trn1_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { +; CHECK-LABEL: trn1_i32: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.trn1q.nxv4i32(<vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @trn1_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) nounwind { +; CHECK-LABEL: trn1_i64: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.trn1q.nxv2i64(<vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + +define <vscale x 8 x half> @trn1_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) nounwind { +; CHECK-LABEL: trn1_f16: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x half> @llvm.aarch64.sve.trn1q.nxv8f16(<vscale x 8 x half> %a, + <vscale x 8 x half> %b) + ret <vscale x 8 x half> %out +} + +define <vscale x 8 x bfloat> @trn1_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) nounwind { +; CHECK-LABEL: trn1_bf16: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.trn1q.nxv8bf16(<vscale x 8 x bfloat> %a, + <vscale x 8 x bfloat> %b) + ret <vscale x 8 x bfloat> %out +} + +define <vscale x 4 x float> @trn1_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) nounwind { +; CHECK-LABEL: trn1_f32: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x float> @llvm.aarch64.sve.trn1q.nxv4f32(<vscale x 4 x float> %a, + <vscale x 4 x float> %b) + ret <vscale x 4 x float> %out +} + +define <vscale x 2 x double> @trn1_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) nounwind { +; CHECK-LABEL: trn1_f64: +; CHECK-NEXT: trn1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x double> @llvm.aarch64.sve.trn1q.nxv2f64(<vscale x 2 x double> %a, + <vscale x 2 x double> %b) + ret <vscale x 2 x double> %out +} + +; +; TRN2Q +; + +define <vscale x 16 x i8> @trn2_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { +; CHECK-LABEL: trn2_i8: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.trn2q.nxv16i8(<vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @trn2_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { +; CHECK-LABEL: trn2_i16: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.trn2q.nxv8i16(<vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + +define <vscale x 4 x i32> @trn2_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { +; CHECK-LABEL: trn2_i32: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.trn2q.nxv4i32(<vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @trn2_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) nounwind { +; CHECK-LABEL: trn2_i64: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.trn2q.nxv2i64(<vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + +define <vscale x 8 x half> @trn2_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) nounwind { +; CHECK-LABEL: trn2_f16: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x half> @llvm.aarch64.sve.trn2q.nxv8f16(<vscale x 8 x half> %a, + <vscale x 8 x half> %b) + ret <vscale x 8 x half> %out +} + +define <vscale x 8 x bfloat> @trn2_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) nounwind { +; CHECK-LABEL: trn2_bf16: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.trn2q.nxv8bf16(<vscale x 8 x bfloat> %a, + <vscale x 8 x bfloat> %b) + ret <vscale x 8 x bfloat> %out +} + +define <vscale x 4 x float> @trn2_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) nounwind { +; CHECK-LABEL: trn2_f32: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x float> @llvm.aarch64.sve.trn2q.nxv4f32(<vscale x 4 x float> %a, + <vscale x 4 x float> %b) + ret <vscale x 4 x float> %out +} + +define <vscale x 2 x double> @trn2_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) nounwind { +; CHECK-LABEL: trn2_f64: +; CHECK-NEXT: trn2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x double> @llvm.aarch64.sve.trn2q.nxv2f64(<vscale x 2 x double> %a, + <vscale x 2 x double> %b) + ret <vscale x 2 x double> %out +} + +; +; UZP1Q +; + +define <vscale x 16 x i8> @uzp1_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { +; CHECK-LABEL: uzp1_i8: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1q.nxv16i8(<vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @uzp1_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { +; CHECK-LABEL: uzp1_i16: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1q.nxv8i16(<vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + +define <vscale x 4 x i32> @uzp1_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { +; CHECK-LABEL: uzp1_i32: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1q.nxv4i32(<vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @uzp1_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) nounwind { +; CHECK-LABEL: uzp1_i64: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp1q.nxv2i64(<vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + +define <vscale x 8 x half> @uzp1_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) nounwind { +; CHECK-LABEL: uzp1_f16: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x half> @llvm.aarch64.sve.uzp1q.nxv8f16(<vscale x 8 x half> %a, + <vscale x 8 x half> %b) + ret <vscale x 8 x half> %out +} + +define <vscale x 8 x bfloat> @uzp1_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) nounwind { +; CHECK-LABEL: uzp1_bf16: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.uzp1q.nxv8bf16(<vscale x 8 x bfloat> %a, + <vscale x 8 x bfloat> %b) + ret <vscale x 8 x bfloat> %out +} + +define <vscale x 4 x float> @uzp1_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) nounwind { +; CHECK-LABEL: uzp1_f32: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x float> @llvm.aarch64.sve.uzp1q.nxv4f32(<vscale x 4 x float> %a, + <vscale x 4 x float> %b) + ret <vscale x 4 x float> %out +} + +define <vscale x 2 x double> @uzp1_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) nounwind { +; CHECK-LABEL: uzp1_f64: +; CHECK-NEXT: uzp1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x double> @llvm.aarch64.sve.uzp1q.nxv2f64(<vscale x 2 x double> %a, + <vscale x 2 x double> %b) + ret <vscale x 2 x double> %out +} + +; +; UZP2Q +; + +define <vscale x 16 x i8> @uzp2_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { +; CHECK-LABEL: uzp2_i8: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp2q.nxv16i8(<vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @uzp2_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { +; CHECK-LABEL: uzp2_i16: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp2q.nxv8i16(<vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + +define <vscale x 4 x i32> @uzp2_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { +; CHECK-LABEL: uzp2_i32: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp2q.nxv4i32(<vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @uzp2_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) nounwind { +; CHECK-LABEL: uzp2_i64: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.uzp2q.nxv2i64(<vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + +define <vscale x 8 x half> @uzp2_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) nounwind { +; CHECK-LABEL: uzp2_f16: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x half> @llvm.aarch64.sve.uzp2q.nxv8f16(<vscale x 8 x half> %a, + <vscale x 8 x half> %b) + ret <vscale x 8 x half> %out +} + +define <vscale x 8 x bfloat> @uzp2_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) nounwind { +; CHECK-LABEL: uzp2_bf16: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.uzp2q.nxv8bf16(<vscale x 8 x bfloat> %a, + <vscale x 8 x bfloat> %b) + ret <vscale x 8 x bfloat> %out +} + +define <vscale x 4 x float> @uzp2_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) nounwind { +; CHECK-LABEL: uzp2_f32: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x float> @llvm.aarch64.sve.uzp2q.nxv4f32(<vscale x 4 x float> %a, + <vscale x 4 x float> %b) + ret <vscale x 4 x float> %out +} + +define <vscale x 2 x double> @uzp2_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) nounwind { +; CHECK-LABEL: uzp2_f64: +; CHECK-NEXT: uzp2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x double> @llvm.aarch64.sve.uzp2q.nxv2f64(<vscale x 2 x double> %a, + <vscale x 2 x double> %b) + ret <vscale x 2 x double> %out +} + +; +; ZIP1Q +; + +define <vscale x 16 x i8> @zip1_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { +; CHECK-LABEL: zip1_i8: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @zip1_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { +; CHECK-LABEL: zip1_i16: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + +define <vscale x 4 x i32> @zip1_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { +; CHECK-LABEL: zip1_i32: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @zip1_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) nounwind { +; CHECK-LABEL: zip1_i64: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + +define <vscale x 8 x half> @zip1_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) nounwind { +; CHECK-LABEL: zip1_f16: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x half> @llvm.aarch64.sve.zip1q.nxv8f16(<vscale x 8 x half> %a, + <vscale x 8 x half> %b) + ret <vscale x 8 x half> %out +} + +define <vscale x 8 x bfloat> @zip1_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) nounwind { +; CHECK-LABEL: zip1_bf16: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.zip1q.nxv8bf16(<vscale x 8 x bfloat> %a, + <vscale x 8 x bfloat> %b) + ret <vscale x 8 x bfloat> %out +} + +define <vscale x 4 x float> @zip1_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) nounwind { +; CHECK-LABEL: zip1_f32: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x float> @llvm.aarch64.sve.zip1q.nxv4f32(<vscale x 4 x float> %a, + <vscale x 4 x float> %b) + ret <vscale x 4 x float> %out +} + +define <vscale x 2 x double> @zip1_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) nounwind { +; CHECK-LABEL: zip1_f64: +; CHECK-NEXT: zip1 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x double> @llvm.aarch64.sve.zip1q.nxv2f64(<vscale x 2 x double> %a, + <vscale x 2 x double> %b) + ret <vscale x 2 x double> %out +} + +; +; ZIP2Q +; + +define <vscale x 16 x i8> @zip2_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) nounwind { +; CHECK-LABEL: zip2_i8: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.zip2q.nxv16i8(<vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @zip2_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) nounwind { +; CHECK-LABEL: zip2_i16: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.zip2q.nxv8i16(<vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + +define <vscale x 4 x i32> @zip2_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) nounwind { +; CHECK-LABEL: zip2_i32: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.zip2q.nxv4i32(<vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @zip2_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) nounwind { +; CHECK-LABEL: zip2_i64: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.zip2q.nxv2i64(<vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + +define <vscale x 8 x half> @zip2_f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b) nounwind { +; CHECK-LABEL: zip2_f16: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x half> @llvm.aarch64.sve.zip2q.nxv8f16(<vscale x 8 x half> %a, + <vscale x 8 x half> %b) + ret <vscale x 8 x half> %out +} + +define <vscale x 8 x bfloat> @zip2_bf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b) nounwind { +; CHECK-LABEL: zip2_bf16: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 8 x bfloat> @llvm.aarch64.sve.zip2q.nxv8bf16(<vscale x 8 x bfloat> %a, + <vscale x 8 x bfloat> %b) + ret <vscale x 8 x bfloat> %out +} + +define <vscale x 4 x float> @zip2_f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b) nounwind { +; CHECK-LABEL: zip2_f32: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 4 x float> @llvm.aarch64.sve.zip2q.nxv4f32(<vscale x 4 x float> %a, + <vscale x 4 x float> %b) + ret <vscale x 4 x float> %out +} + +define <vscale x 2 x double> @zip2_f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) nounwind { +; CHECK-LABEL: zip2_f64: +; CHECK-NEXT: zip2 z0.q, z0.q, z1.q +; CHECK-NEXT: ret + %out = call <vscale x 2 x double> @llvm.aarch64.sve.zip2q.nxv2f64(<vscale x 2 x double> %a, + <vscale x 2 x double> %b) + ret <vscale x 2 x double> %out +} + + +declare <vscale x 2 x double> @llvm.aarch64.sve.trn1q.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.trn1q.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +declare <vscale x 4 x float> @llvm.aarch64.sve.trn1q.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.trn1q.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.trn1q.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>) +declare <vscale x 8 x half> @llvm.aarch64.sve.trn1q.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.trn1q.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.trn1q.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) + +declare <vscale x 2 x double> @llvm.aarch64.sve.trn2q.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.trn2q.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +declare <vscale x 4 x float> @llvm.aarch64.sve.trn2q.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.trn2q.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.trn2q.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>) +declare <vscale x 8 x half> @llvm.aarch64.sve.trn2q.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.trn2q.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.trn2q.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) + +declare <vscale x 2 x double> @llvm.aarch64.sve.uzp1q.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.uzp1q.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +declare <vscale x 4 x float> @llvm.aarch64.sve.uzp1q.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp1q.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.uzp1q.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>) +declare <vscale x 8 x half> @llvm.aarch64.sve.uzp1q.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.uzp1q.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp1q.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) + +declare <vscale x 2 x double> @llvm.aarch64.sve.uzp2q.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.uzp2q.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +declare <vscale x 4 x float> @llvm.aarch64.sve.uzp2q.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp2q.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.uzp2q.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>) +declare <vscale x 8 x half> @llvm.aarch64.sve.uzp2q.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.uzp2q.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp2q.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) + +declare <vscale x 2 x double> @llvm.aarch64.sve.zip1q.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.zip1q.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +declare <vscale x 4 x float> @llvm.aarch64.sve.zip1q.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.zip1q.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.zip1q.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>) +declare <vscale x 8 x half> @llvm.aarch64.sve.zip1q.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.zip1q.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.zip1q.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>) + +declare <vscale x 2 x double> @llvm.aarch64.sve.zip2q.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.zip2q.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>) +declare <vscale x 4 x float> @llvm.aarch64.sve.zip2q.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.zip2q.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>) +declare <vscale x 8 x bfloat> @llvm.aarch64.sve.zip2q.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>) +declare <vscale x 8 x half> @llvm.aarch64.sve.zip2q.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.zip2q.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.zip2q.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)