diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td --- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -141,6 +141,28 @@ Intrinsic; } +//===----------------------------------------------------------------------===// +// PowerPC MMA Intrinsic Multi Class Definitions. +// + +multiclass PowerPC_MMA_ACC_Intrinsic args> { + def NAME: Intrinsic<[llvm_v512i1_ty], args, [IntrNoMem]>; + def pp : Intrinsic<[llvm_v512i1_ty], !listconcat([llvm_v512i1_ty], args), + [IntrNoMem]>; + def pn : Intrinsic<[llvm_v512i1_ty], !listconcat([llvm_v512i1_ty], args), + [IntrNoMem]>; + def np : Intrinsic<[llvm_v512i1_ty], !listconcat([llvm_v512i1_ty], args), + [IntrNoMem]>; + def nn : Intrinsic<[llvm_v512i1_ty], !listconcat([llvm_v512i1_ty], args), + [IntrNoMem]>; +} + +multiclass PowerPC_MMA_ACC_PP_Intrinsic args> { + def NAME: Intrinsic<[llvm_v512i1_ty], args, [IntrNoMem]>; + def pp : Intrinsic<[llvm_v512i1_ty], !listconcat([llvm_v512i1_ty], args), + [IntrNoMem]>; +} + //===----------------------------------------------------------------------===// // PowerPC Altivec Intrinsic Class Definitions. // @@ -1371,7 +1393,6 @@ // PowerPC set FPSCR Intrinsic Definitions. def int_ppc_setrnd : GCCBuiltin<"__builtin_setrnd">, Intrinsic<[llvm_double_ty], [llvm_i32_ty], []>; - } let TargetPrefix = "ppc" in { @@ -1400,5 +1421,60 @@ def int_ppc_mma_xxsetaccz : Intrinsic<[llvm_v512i1_ty], [], [IntrNoMem]>; -} + // MMA Reduced-Precision: Outer Product Intrinsic Definitions. + defm int_ppc_mma_xvi4ger8 : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvi4ger8 : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty]>; + + defm int_ppc_mma_xvi8ger4 : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvi8ger4 : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty]>; + + defm int_ppc_mma_xvi16ger2s : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvi16ger2s : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty]>; + + defm int_ppc_mma_xvf16ger2 : + PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvf16ger2 : + PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty]>; + defm int_ppc_mma_xvf32ger : + PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvf32ger : + PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty]>; + defm int_ppc_mma_xvf64ger : + PowerPC_MMA_ACC_Intrinsic<[llvm_v256i1_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvf64ger : + PowerPC_MMA_ACC_Intrinsic<[llvm_v256i1_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty]>; + + // MMA Reduced-Precision: bfloat16 Outer Product Intrinsic Definitions. + defm int_ppc_mma_xvbf16ger2 : + PowerPC_MMA_ACC_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvbf16ger2 : + PowerPC_MMA_ACC_Intrinsic< + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty]>; + + // MMA Reduced-Precision: Missing Integer-based Outer Product Operations. + defm int_ppc_mma_xvi16ger2 : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty]>; + defm int_ppc_mma_pmxvi16ger2 : + PowerPC_MMA_ACC_PP_Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty]>; + def int_ppc_mma_xvi8ger4spp : + Intrinsic<[llvm_v512i1_ty], + [llvm_v512i1_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; + def int_ppc_mma_pmxvi8ger4spp : + Intrinsic<[llvm_v512i1_ty], + [llvm_v512i1_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; +} diff --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td --- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td +++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td @@ -1,3 +1,8 @@ +// Mask immediates for MMA instructions (2, 4 and 8 bits). +def Msk2Imm : ImmLeaf(Imm); }]>; +def Msk4Imm : ImmLeaf(Imm); }]>; +def Msk8Imm : ImmLeaf(Imm); }]>; + //===----------------------------------------------------------------------===// // PowerPC ISA 3.1 specific type constraints. // @@ -1341,6 +1346,220 @@ "xvf64ger", "$AT, $XA, $XB">; //------------------------------------------------------------------------------ +// MMA Intrinsics +let Predicates = [MMA] in { + def : Pat<(v512i1 (int_ppc_mma_xvi4ger8 v16i8:$XA, v16i8:$XB)), + (XVI4GER8 RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvi4ger8pp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVI4GER8PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + + def : Pat<(v512i1 (int_ppc_mma_xvi8ger4 v16i8:$XA, v16i8:$XB)), + (XVI8GER4 RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvi8ger4pp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVI8GER4PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + + def : Pat<(v512i1 (int_ppc_mma_xvi16ger2s v16i8:$XA, v16i8:$XB)), + (XVI16GER2S RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvi16ger2spp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVI16GER2SPP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + + def : Pat<(v512i1 (int_ppc_mma_xvf16ger2 v16i8:$XA, v16i8:$XB)), + (XVF16GER2 RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf16ger2pp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF16GER2PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf16ger2pn v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF16GER2PN $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf16ger2np v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF16GER2NP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf16ger2nn v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF16GER2NN $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + + def : Pat<(v512i1 (int_ppc_mma_xvf32ger v16i8:$XA, v16i8:$XB)), + (XVF32GER RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf32gerpp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF32GERPP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf32gerpn v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF32GERPN $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf32gernp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF32GERNP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf32gernn v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVF32GERNN $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf64ger v256i1:$XA, v16i8:$XB)), + (XVF64GER $XA, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf64gerpp v512i1:$ATi, v256i1:$XA, v16i8:$XB)), + (XVF64GERPP $ATi, $XA, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf64gerpn v512i1:$ATi, v256i1:$XA, v16i8:$XB)), + (XVF64GERPN $ATi, $XA, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf64gernp v512i1:$ATi, v256i1:$XA, v16i8:$XB)), + (XVF64GERNP $ATi, $XA, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvf64gernn v512i1:$ATi, v256i1:$XA, v16i8:$XB)), + (XVF64GERNN $ATi, $XA, RCCp.BToVSRC)>; + + def : Pat<(v512i1 (int_ppc_mma_xvbf16ger2 v16i8:$XA, v16i8:$XB)), + (XVBF16GER2 RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvbf16ger2pp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVBF16GER2PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvbf16ger2pn v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVBF16GER2PN $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvbf16ger2np v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVBF16GER2NP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvbf16ger2nn v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVBF16GER2NN $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvi16ger2 v16i8:$XA, v16i8:$XB)), + (XVI16GER2 RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvi16ger2pp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVI16GER2PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; + def : Pat<(v512i1 (int_ppc_mma_xvi8ger4spp v512i1:$ATi, v16i8:$XA, v16i8:$XB)), + (XVI8GER4SPP $ATi, RCCp.AToVSRC, RCCp.BToVSRC)>; +} + +// MMA Intrinsics +let Predicates = [MMA, PrefixInstrs] in { + def : Pat<(v512i1 (int_ppc_mma_pmxvi4ger8 v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk8Imm:$PMSK)), + (PMXVI4GER8 RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk8Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvi4ger8pp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk8Imm:$PMSK)), + (PMXVI4GER8PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk8Imm:$PMSK)>; + + def : Pat<(v512i1 (int_ppc_mma_pmxvi8ger4 v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk4Imm:$PMSK)), + (PMXVI8GER4 RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk4Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvi8ger4pp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk4Imm:$PMSK)), + (PMXVI8GER4PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk4Imm:$PMSK)>; + + def : Pat<(v512i1 (int_ppc_mma_pmxvi16ger2s v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)), + (PMXVI16GER2S RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvi16ger2spp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVI16GER2SPP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2 v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)), + (PMXVF16GER2 RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2pp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVF16GER2PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2pn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVF16GER2PN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2np v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVF16GER2NP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2pn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVF16GER2PN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2np v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVF16GER2NP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf16ger2nn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVF16GER2NN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + + def : Pat<(v512i1 (int_ppc_mma_pmxvf32ger v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK)), + (PMXVF32GER RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf32gerpp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK)), + (PMXVF32GERPP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf32gerpn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK)), + (PMXVF32GERPN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf32gernp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK)), + (PMXVF32GERNP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf32gernn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK)), + (PMXVF32GERNN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK)>; + + def : Pat<(v512i1 (int_ppc_mma_pmxvf64ger v256i1:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk2Imm:$YMSK)), + (PMXVF64GER $XA, RCCp.BToVSRC, Msk4Imm:$XMSK, Msk2Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf64gerpp v512i1:$ATi, v256i1:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk2Imm:$YMSK)), + (PMXVF64GERPP $ATi, $XA, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk2Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf64gerpn v512i1:$ATi, v256i1:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk2Imm:$YMSK)), + (PMXVF64GERPN $ATi, $XA, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk2Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf64gernp v512i1:$ATi, v256i1:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk2Imm:$YMSK)), + (PMXVF64GERNP $ATi, $XA, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk2Imm:$YMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvf64gernn v512i1:$ATi, v256i1:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk2Imm:$YMSK)), + (PMXVF64GERNN $ATi, $XA, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk2Imm:$YMSK)>; + + def : Pat<(v512i1 (int_ppc_mma_pmxvbf16ger2 v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)), + (PMXVBF16GER2 RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvbf16ger2pp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVBF16GER2PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvbf16ger2pn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVBF16GER2PN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvbf16ger2np v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVBF16GER2NP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvbf16ger2nn v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVBF16GER2NN $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvi16ger2 v16i8:$XA, v16i8:$XB, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)), + (PMXVI16GER2 RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvi8ger4spp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVI8GER4SPP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; + def : Pat<(v512i1 (int_ppc_mma_pmxvi16ger2pp v512i1:$ATi, v16i8:$XA, v16i8:$XB, + Msk4Imm:$XMSK, Msk4Imm:$YMSK, + Msk2Imm:$PMSK)), + (PMXVI16GER2PP $ATi, RCCp.AToVSRC, RCCp.BToVSRC, Msk4Imm:$XMSK, + Msk4Imm:$YMSK, Msk2Imm:$PMSK)>; +} + def Concats { dag VecsToVecPair0 = (v256i1 (INSERT_SUBREG diff --git a/llvm/test/CodeGen/PowerPC/bfloat16-outer-product.ll b/llvm/test/CodeGen/PowerPC/bfloat16-outer-product.ll --- a/llvm/test/CodeGen/PowerPC/bfloat16-outer-product.ll +++ b/llvm/test/CodeGen/PowerPC/bfloat16-outer-product.ll @@ -6,6 +6,412 @@ ; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \ ; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE +; Function Attrs: nofree nounwind writeonly +define dso_local void @test50(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test50: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvbf16ger2 acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test50: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvbf16ger2 acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvbf16ger2(<16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind writeonly +define dso_local void @test51(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test51: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvbf16ger2 acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test51: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvbf16ger2 acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2(<16 x i8>, <16 x i8>, i32, i32, i32) + +; Function Attrs: nofree nounwind +define dso_local void @test52(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test52: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvbf16ger2pp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test52: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvbf16ger2pp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvbf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind +define dso_local void @test53(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test53: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvbf16ger2pn acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test53: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvbf16ger2pn acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvbf16ger2pn(<512 x i1>, <16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind +define dso_local void @test54(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test54: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvbf16ger2np acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test54: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvbf16ger2np acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvbf16ger2np(<512 x i1>, <16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind +define dso_local void @test55(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test55: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvbf16ger2nn acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test55: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvbf16ger2nn acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvbf16ger2nn(<512 x i1>, <16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind +define dso_local void @test56(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test56: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvbf16ger2pp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test56: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvbf16ger2pp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +; Function Attrs: nofree nounwind +define dso_local void @test57(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test57: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvbf16ger2pn acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test57: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvbf16ger2pn acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2pn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +; Function Attrs: nofree nounwind +define dso_local void @test58(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test58: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvbf16ger2np acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test58: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvbf16ger2np acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2np(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +; Function Attrs: nofree nounwind +define dso_local void @test59(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test59: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvbf16ger2nn acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test59: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvbf16ger2nn acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvbf16ger2nn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + ; Function Attrs: nofree nounwind writeonly define dso_local void @test60(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { ; CHECK-LABEL: test60: @@ -25,6 +431,7 @@ store <16 x i8> %0, <16 x i8>* %1, align 16 ret void } + ; Function Attrs: nounwind readnone declare <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8>) diff --git a/llvm/test/CodeGen/PowerPC/mma-integer-based-outer-product.ll b/llvm/test/CodeGen/PowerPC/mma-integer-based-outer-product.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/mma-integer-based-outer-product.ll @@ -0,0 +1,241 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \ +; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \ +; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE + +; Function Attrs: nofree nounwind writeonly +define dso_local void @test1(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvi16ger2 acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test1: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvi16ger2 acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvi16ger2(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvi16ger2(<16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind writeonly +define dso_local void @test2(i8* nocapture readnone %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvi16ger2 acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test2: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvi16ger2 acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvi16ger2(<16 x i8>, <16 x i8>, i32, i32, i32) + +; Function Attrs: nofree nounwind +define dso_local void @test3(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi8ger4spp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test3: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi8ger4spp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi8ger4spp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvi8ger4spp(<512 x i1>, <16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind +define dso_local void @test4(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi16ger2pp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test4: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi16ger2pp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.xvi16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>) + +; Function Attrs: nofree nounwind +define dso_local void @test5(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvi8ger4spp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test5: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvi8ger4spp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvi8ger4spp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +; Function Attrs: nofree nounwind +define dso_local void @test6(i8* nocapture readonly %vqp, i8* nocapture readnone %vpp, <16 x i8> %vc, i8* nocapture %resp) { +; CHECK-LABEL: test6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvi16ger2pp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test6: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvi16ger2pp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + +; Function Attrs: nounwind readnone +declare <512 x i1> @llvm.ppc.mma.pmxvi16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) diff --git a/llvm/test/CodeGen/PowerPC/mma-outer-product.ll b/llvm/test/CodeGen/PowerPC/mma-outer-product.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/mma-outer-product.ll @@ -0,0 +1,1822 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \ +; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names \ +; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE + +declare <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) +declare <256 x i1> @llvm.ppc.mma.assemble.pair(<16 x i8>, <16 x i8>) +define void @intrinsics1(<16 x i8> %vc1, <16 x i8> %vc2, <16 x i8> %vc3, <16 x i8> %vc4, i8* %ptr) { +; CHECK-LABEL: intrinsics1: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $v5 killed $v5 killed $vsrp18 def $vsrp18 +; CHECK-NEXT: vmr v1, v4 +; CHECK-NEXT: vmr v4, v3 +; CHECK-NEXT: ld r3, 96(r1) +; CHECK-NEXT: vmr v0, v2 +; CHECK-NEXT: xxlor vs5, v2, v2 +; CHECK-NEXT: xxlor vs4, v5, v5 +; CHECK-NEXT: xxlor vs0, v0, v0 +; CHECK-NEXT: xxlor vs1, v1, v1 +; CHECK-NEXT: xxlor vs2, v4, v4 +; CHECK-NEXT: xxlor vs3, v5, v5 +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi4ger8pp acc0, v2, v3 +; CHECK-NEXT: xvf16ger2pp acc0, v2, v1 +; CHECK-NEXT: pmxvf32gerpn acc0, v3, v5, 0, 0 +; CHECK-NEXT: pmxvf64gernp acc0, vsp4, v0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r3) +; CHECK-NEXT: stxv vs1, 32(r3) +; CHECK-NEXT: stxv vs2, 16(r3) +; CHECK-NEXT: stxvx vs3, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: intrinsics1: +; CHECK-BE: # %bb.0: +; CHECK-BE-NEXT: # kill: def $v5 killed $v5 killed $vsrp18 def $vsrp18 +; CHECK-BE-NEXT: vmr v1, v4 +; CHECK-BE-NEXT: vmr v4, v3 +; CHECK-BE-NEXT: ld r3, 112(r1) +; CHECK-BE-NEXT: vmr v0, v2 +; CHECK-BE-NEXT: xxlor vs5, v2, v2 +; CHECK-BE-NEXT: xxlor vs4, v5, v5 +; CHECK-BE-NEXT: xxlor vs0, v0, v0 +; CHECK-BE-NEXT: xxlor vs1, v1, v1 +; CHECK-BE-NEXT: xxlor vs2, v4, v4 +; CHECK-BE-NEXT: xxlor vs3, v5, v5 +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi4ger8pp acc0, v2, v3 +; CHECK-BE-NEXT: xvf16ger2pp acc0, v2, v1 +; CHECK-BE-NEXT: pmxvf32gerpn acc0, v3, v5, 0, 0 +; CHECK-BE-NEXT: pmxvf64gernp acc0, vsp4, v0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r3) +; CHECK-BE-NEXT: stxvx vs0, 0, r3 +; CHECK-BE-NEXT: stxv vs3, 48(r3) +; CHECK-BE-NEXT: stxv vs2, 32(r3) +; CHECK-BE-NEXT: blr + %1 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %vc1, <16 x i8> %vc3, <16 x i8> %vc2, <16 x i8> %vc4) + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi4ger8pp(<512 x i1> %1, <16 x i8> %vc1, <16 x i8> %vc2) + %3 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1> %2, <16 x i8> %vc1, <16 x i8> %vc3) + %4 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %3, <16 x i8> %vc2, <16 x i8> %vc4, i32 0, i32 0) + %5 = tail call <256 x i1> @llvm.ppc.mma.assemble.pair(<16 x i8> %vc4, <16 x i8> %vc1) + %6 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %4, <256 x i1> %5, <16 x i8> %vc1, i32 0, i32 0) + %7 = bitcast i8* %ptr to <512 x i1>* + store <512 x i1> %6, <512 x i1>* %7, align 64 + ret void +} + +declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1>) +define void @intrinsics2(<16 x i8>* %ptr1, <16 x i8>* %ptr2, <16 x i8>* %ptr3, <16 x i8>* %ptr4, i8* %ptr) { +; CHECK-LABEL: intrinsics2: +; CHECK: # %bb.0: +; CHECK-NEXT: lxv vs4, 0(r3) +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs6, 0(r5) +; CHECK-NEXT: lxv vs7, 0(r6) +; CHECK-NEXT: xxlor vs0, vs4, vs4 +; CHECK-NEXT: xxlor vs9, vs4, vs4 +; CHECK-NEXT: xxlor vs1, vs5, vs5 +; CHECK-NEXT: xxlor vs2, vs6, vs6 +; CHECK-NEXT: xxlor vs3, vs7, vs7 +; CHECK-NEXT: xxlor vs8, vs7, vs7 +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi8ger4pp acc0, vs4, vs5 +; CHECK-NEXT: xvf16ger2pn acc0, vs4, vs6 +; CHECK-NEXT: pmxvf32gernn acc0, vs5, vs7, 0, 0 +; CHECK-NEXT: pmxvf64gernn acc0, vsp8, vs4, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs3, 0(r3) +; CHECK-NEXT: stxv vs2, 0(r4) +; CHECK-NEXT: stxv vs1, 0(r5) +; CHECK-NEXT: stxv vs0, 0(r6) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: intrinsics2: +; CHECK-BE: # %bb.0: +; CHECK-BE-NEXT: lxv vs4, 0(r3) +; CHECK-BE-NEXT: lxv vs5, 0(r4) +; CHECK-BE-NEXT: lxv vs6, 0(r5) +; CHECK-BE-NEXT: lxv vs7, 0(r6) +; CHECK-BE-NEXT: xxlor vs0, vs4, vs4 +; CHECK-BE-NEXT: xxlor vs9, vs4, vs4 +; CHECK-BE-NEXT: xxlor vs1, vs5, vs5 +; CHECK-BE-NEXT: xxlor vs2, vs6, vs6 +; CHECK-BE-NEXT: xxlor vs3, vs7, vs7 +; CHECK-BE-NEXT: xxlor vs8, vs7, vs7 +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi8ger4pp acc0, vs4, vs5 +; CHECK-BE-NEXT: xvf16ger2pn acc0, vs4, vs6 +; CHECK-BE-NEXT: pmxvf32gernn acc0, vs5, vs7, 0, 0 +; CHECK-BE-NEXT: pmxvf64gernn acc0, vsp8, vs4, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs0, 0(r3) +; CHECK-BE-NEXT: stxv vs1, 0(r4) +; CHECK-BE-NEXT: stxv vs2, 0(r5) +; CHECK-BE-NEXT: stxv vs3, 0(r6) +; CHECK-BE-NEXT: blr + %vc1 = load <16 x i8>, <16 x i8>* %ptr1, align 16 + %vc2 = load <16 x i8>, <16 x i8>* %ptr2, align 16 + %vc3 = load <16 x i8>, <16 x i8>* %ptr3, align 16 + %vc4 = load <16 x i8>, <16 x i8>* %ptr4, align 16 + %1 = tail call <512 x i1> @llvm.ppc.mma.assemble.acc(<16 x i8> %vc1, <16 x i8> %vc2, <16 x i8> %vc3, <16 x i8> %vc4) + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1> %1, <16 x i8> %vc1, <16 x i8> %vc2) + %3 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2pn(<512 x i1> %2, <16 x i8> %vc1, <16 x i8> %vc3) + %4 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %3, <16 x i8> %vc2, <16 x i8> %vc4, i32 0, i32 0) + %5 = tail call <256 x i1> @llvm.ppc.mma.assemble.pair(<16 x i8> %vc4, <16 x i8> %vc1) + %6 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %4, <256 x i1> %5, <16 x i8> %vc1, i32 0, i32 0) + %7 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.ppc.mma.disassemble.acc(<512 x i1> %6) + %8 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %7, 0 + %9 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %7, 1 + %10 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %7, 2 + %11 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %7, 3 + %12 = bitcast i8* %ptr to <512 x i1>* + store <16 x i8> %8, <16 x i8>* %ptr1, align 16 + store <16 x i8> %9, <16 x i8>* %ptr2, align 16 + store <16 x i8> %10, <16 x i8>* %ptr3, align 16 + store <16 x i8> %11, <16 x i8>* %ptr4, align 16 + ret void +} + +define void @test1(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvi4ger8 acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test1: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvi4ger8 acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvi4ger8(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvi4ger8(<16 x i8>, <16 x i8>) + +define void @test2(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi4ger8pp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test2: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi4ger8pp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi4ger8pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvi4ger8pp(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test3(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvi4ger8 acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test3: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvi4ger8 acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvi4ger8(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvi4ger8(<16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test4(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvi4ger8pp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test4: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvi4ger8pp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvi4ger8pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvi4ger8pp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test5(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvi8ger4 acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test5: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvi8ger4 acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvi8ger4(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvi8ger4(<16 x i8>, <16 x i8>) + +define void @test6(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi8ger4pp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test6: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi8ger4pp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvi8ger4pp(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test7(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test7: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvi8ger4 acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test7: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvi8ger4 acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvi8ger4(<16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test8(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvi8ger4pp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test8: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvi8ger4pp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvi8ger4pp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test9(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test9: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvi16ger2s acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test9: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvi16ger2s acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvi16ger2s(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvi16ger2s(<16 x i8>, <16 x i8>) + +define void @test10(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test10: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvi16ger2spp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test10: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvi16ger2spp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvi16ger2spp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvi16ger2spp(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test11(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test11: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvi16ger2s acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test11: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvi16ger2s acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvi16ger2s(<16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test12(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test12: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvi16ger2spp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test12: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvi16ger2spp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvi16ger2spp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test13(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test13: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvf16ger2 acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test13: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvf16ger2 acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf16ger2(<16 x i8>, <16 x i8>) + +define void @test14(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test14: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf16ger2pp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test14: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf16ger2pp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test15(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test15: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf16ger2pn acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test15: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf16ger2pn acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2pn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf16ger2pn(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test16(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf16ger2np acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test16: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf16ger2np acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2np(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf16ger2np(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test17(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test17: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf16ger2nn acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test17: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf16ger2nn acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf16ger2nn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf16ger2nn(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test18(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test18: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvf16ger2 acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test18: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvf16ger2 acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvf16ger2(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf16ger2(<16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test19(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test19: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf16ger2pp acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test19: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf16ger2pp acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf16ger2pp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test20(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test20: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf16ger2pn acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test20: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf16ger2pn acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf16ger2pn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf16ger2pn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test21(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test21: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf16ger2np acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test21: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf16ger2np acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf16ger2np(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf16ger2np(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test22(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test22: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf16ger2nn acc0, v2, v2, 0, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test22: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf16ger2nn acc0, v2, v2, 0, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf16ger2nn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf16ger2nn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32, i32) + +define void @test23(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test23: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvf32ger acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test23: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: xvf32ger acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8> %vc, <16 x i8> %vc) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf32ger(<16 x i8>, <16 x i8>) + +define void @test24(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test24: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf32gerpp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test24: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf32gerpp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf32gerpp(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test25(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test25: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf32gerpn acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test25: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf32gerpn acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf32gerpn(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test26(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test26: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf32gernp acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test26: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf32gernp acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf32gernp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf32gernp(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test27(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test27: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: xvf32gernn acc0, v2, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test27: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: xvf32gernn acc0, v2, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf32gernn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf32gernn(<512 x i1>, <16 x i8>, <16 x i8>) + +define void @test28(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test28: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pmxvf32ger acc0, v2, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test28: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: pmxvf32ger acc0, v2, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0) + %1 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %0, <512 x i1>* %1, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf32ger(<16 x i8>, <16 x i8>, i32, i32) + +define void @test29(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test29: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf32gerpp acc0, v2, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test29: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf32gerpp acc0, v2, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf32gerpp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32) + +define void @test30(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test30: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf32gerpn acc0, v2, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test30: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf32gerpn acc0, v2, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf32gerpn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32) + +define void @test31(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test31: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf32gernp acc0, v2, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test31: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf32gernp acc0, v2, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf32gernp(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32) + +define void @test32(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: pmxvf32gernn acc0, v2, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test32: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: pmxvf32gernn acc0, v2, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1> %1, <16 x i8> %vc, <16 x i8> %vc, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf32gernn(<512 x i1>, <16 x i8>, <16 x i8>, i32, i32) + +define void @test33(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test33: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 0(r4) +; CHECK-NEXT: lxv vs0, 16(r4) +; CHECK-NEXT: xvf64ger acc1, vsp0, v2 +; CHECK-NEXT: xxmfacc acc1 +; CHECK-NEXT: stxv vs4, 48(r7) +; CHECK-NEXT: stxv vs5, 32(r7) +; CHECK-NEXT: stxv vs6, 16(r7) +; CHECK-NEXT: stxv vs7, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test33: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r4) +; CHECK-BE-NEXT: lxv vs0, 0(r4) +; CHECK-BE-NEXT: xvf64ger acc1, vsp0, v2 +; CHECK-BE-NEXT: xxmfacc acc1 +; CHECK-BE-NEXT: stxv vs5, 16(r7) +; CHECK-BE-NEXT: stxv vs4, 0(r7) +; CHECK-BE-NEXT: stxv vs7, 48(r7) +; CHECK-BE-NEXT: stxv vs6, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vpp to <256 x i1>* + %1 = load <256 x i1>, <256 x i1>* %0, align 32 + %2 = tail call <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1> %1, <16 x i8> %vc) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf64ger(<256 x i1>, <16 x i8>) + +define void @test34(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test34: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: xvf64gerpp acc0, vsp4, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test34: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: xvf64gerpp acc0, vsp4, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf64gerpp(<512 x i1>, <256 x i1>, <16 x i8>) + +define void @test35(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test35: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: xvf64gerpn acc0, vsp4, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test35: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: xvf64gerpn acc0, vsp4, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.xvf64gerpn(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf64gerpn(<512 x i1>, <256 x i1>, <16 x i8>) + +define void @test36(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test36: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: xvf64gernp acc0, vsp4, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test36: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: xvf64gernp acc0, vsp4, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.xvf64gernp(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf64gernp(<512 x i1>, <256 x i1>, <16 x i8>) + +define void @test37(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test37: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: xvf64gernn acc0, vsp4, v2 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test37: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: xvf64gernn acc0, vsp4, v2 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.xvf64gernn(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.xvf64gernn(<512 x i1>, <256 x i1>, <16 x i8>) + +define void @test38(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test38: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 0(r4) +; CHECK-NEXT: lxv vs0, 16(r4) +; CHECK-NEXT: pmxvf64ger acc1, vsp0, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc1 +; CHECK-NEXT: stxv vs4, 48(r7) +; CHECK-NEXT: stxv vs5, 32(r7) +; CHECK-NEXT: stxv vs6, 16(r7) +; CHECK-NEXT: stxv vs7, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test38: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r4) +; CHECK-BE-NEXT: lxv vs0, 0(r4) +; CHECK-BE-NEXT: pmxvf64ger acc1, vsp0, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc1 +; CHECK-BE-NEXT: stxv vs5, 16(r7) +; CHECK-BE-NEXT: stxv vs4, 0(r7) +; CHECK-BE-NEXT: stxv vs7, 48(r7) +; CHECK-BE-NEXT: stxv vs6, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vpp to <256 x i1>* + %1 = load <256 x i1>, <256 x i1>* %0, align 32 + %2 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1> %1, <16 x i8> %vc, i32 0, i32 0) + %3 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %2, <512 x i1>* %3, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf64ger(<256 x i1>, <16 x i8>, i32, i32) + +define void @test39(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test39: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: pmxvf64gerpp acc0, vsp4, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test39: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: pmxvf64gerpp acc0, vsp4, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc, i32 0, i32 0) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf64gerpp(<512 x i1>, <256 x i1>, <16 x i8>, i32, i32) + +define void @test40(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test40: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: pmxvf64gerpn acc0, vsp4, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test40: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: pmxvf64gerpn acc0, vsp4, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc, i32 0, i32 0) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf64gerpn(<512 x i1>, <256 x i1>, <16 x i8>, i32, i32) + +define void @test41(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test41: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: pmxvf64gernp acc0, vsp4, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test41: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: pmxvf64gernp acc0, vsp4, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc, i32 0, i32 0) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf64gernp(<512 x i1>, <256 x i1>, <16 x i8>, i32, i32) + +define void @test42(i8* %vqp, i8* %vpp, <16 x i8> %vc, i8* %resp) { +; CHECK-LABEL: test42: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lxv vs1, 32(r3) +; CHECK-NEXT: lxv vs0, 48(r3) +; CHECK-NEXT: lxv vs3, 0(r3) +; CHECK-NEXT: lxv vs2, 16(r3) +; CHECK-NEXT: xxmtacc acc0 +; CHECK-NEXT: lxv vs5, 0(r4) +; CHECK-NEXT: lxv vs4, 16(r4) +; CHECK-NEXT: pmxvf64gernn acc0, vsp4, v2, 0, 0 +; CHECK-NEXT: xxmfacc acc0 +; CHECK-NEXT: stxv vs0, 48(r7) +; CHECK-NEXT: stxv vs1, 32(r7) +; CHECK-NEXT: stxv vs2, 16(r7) +; CHECK-NEXT: stxv vs3, 0(r7) +; CHECK-NEXT: blr +; +; CHECK-BE-LABEL: test42: +; CHECK-BE: # %bb.0: # %entry +; CHECK-BE-NEXT: lxv vs1, 16(r3) +; CHECK-BE-NEXT: lxv vs0, 0(r3) +; CHECK-BE-NEXT: lxv vs3, 48(r3) +; CHECK-BE-NEXT: lxv vs2, 32(r3) +; CHECK-BE-NEXT: xxmtacc acc0 +; CHECK-BE-NEXT: lxv vs5, 16(r4) +; CHECK-BE-NEXT: lxv vs4, 0(r4) +; CHECK-BE-NEXT: pmxvf64gernn acc0, vsp4, v2, 0, 0 +; CHECK-BE-NEXT: xxmfacc acc0 +; CHECK-BE-NEXT: stxv vs1, 16(r7) +; CHECK-BE-NEXT: stxv vs0, 0(r7) +; CHECK-BE-NEXT: stxv vs3, 48(r7) +; CHECK-BE-NEXT: stxv vs2, 32(r7) +; CHECK-BE-NEXT: blr +entry: + %0 = bitcast i8* %vqp to <512 x i1>* + %1 = load <512 x i1>, <512 x i1>* %0, align 64 + %2 = bitcast i8* %vpp to <256 x i1>* + %3 = load <256 x i1>, <256 x i1>* %2, align 32 + %4 = tail call <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1> %1, <256 x i1> %3, <16 x i8> %vc, i32 0, i32 0) + %5 = bitcast i8* %resp to <512 x i1>* + store <512 x i1> %4, <512 x i1>* %5, align 64 + ret void +} + + +declare <512 x i1> @llvm.ppc.mma.pmxvf64gernn(<512 x i1>, <256 x i1>, <16 x i8>, i32, i32)