Index: clang/include/clang/Basic/BuiltinsPPC.def =================================================================== --- clang/include/clang/Basic/BuiltinsPPC.def +++ clang/include/clang/Basic/BuiltinsPPC.def @@ -451,6 +451,14 @@ BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "") +// P10 Vector Permute Extended built-in. +BUILTIN(__builtin_vsx_xxpermx, "V16UcV16UcV16UcV16UcIi", "") +// P10 Vector Blend built-ins. +BUILTIN(__builtin_vsx_xxblendvb, "V16UcV16UcV16UcV16Uc", "") +BUILTIN(__builtin_vsx_xxblendvh, "V8UsV8UsV8UsV8Us", "") +BUILTIN(__builtin_vsx_xxblendvw, "V4UiV4UiV4UiV4Ui", "") +BUILTIN(__builtin_vsx_xxblendvd, "V2ULLiV2ULLiV2ULLiV2ULLi", "") + // Float 128 built-ins BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "") BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "") Index: clang/lib/Headers/altivec.h =================================================================== --- clang/lib/Headers/altivec.h +++ clang/lib/Headers/altivec.h @@ -16889,6 +16889,70 @@ #define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7)) +#ifdef __VSX__ + +/* vec_permx */ + +#define vec_permx(__a, __b, __c, __d) \ + __builtin_vsx_xxpermx((__a), (__b), (__c), (__d)) + +/* vec_blendv */ + +static __inline__ vector signed char __ATTRS_o_ai vec_blendv( + vector signed char __a, vector signed char __b, vector unsigned char __c) { + return __builtin_vsx_xxblendvb(__a, __b, __c); +} + +static __inline__ vector unsigned char __ATTRS_o_ai +vec_blendv(vector unsigned char __a, vector unsigned char __b, + vector unsigned char __c) { + return __builtin_vsx_xxblendvb(__a, __b, __c); +} + +static __inline__ vector signed short __ATTRS_o_ai +vec_blendv(vector signed short __a, vector signed short __b, + vector unsigned short __c) { + return __builtin_vsx_xxblendvh(__a, __b, __c); +} + +static __inline__ vector unsigned short __ATTRS_o_ai +vec_blendv(vector unsigned short __a, vector unsigned short __b, + vector unsigned short __c) { + return __builtin_vsx_xxblendvh(__a, __b, __c); +} + +static __inline__ vector signed int __ATTRS_o_ai vec_blendv( + vector signed int __a, vector signed int __b, vector unsigned int __c) { + return __builtin_vsx_xxblendvw(__a, __b, __c); +} + +static __inline__ vector unsigned int __ATTRS_o_ai vec_blendv( + vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) { + return __builtin_vsx_xxblendvw(__a, __b, __c); +} + +static __inline__ vector signed long long __ATTRS_o_ai +vec_blendv(vector signed long long __a, vector signed long long __b, + vector unsigned long long __c) { + return __builtin_vsx_xxblendvd(__a, __b, __c); +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_blendv(vector unsigned long long __a, vector unsigned long long __b, + vector unsigned long long __c) { + return __builtin_vsx_xxblendvd(__a, __b, __c); +} + +static __inline__ vector float __ATTRS_o_ai +vec_blendv(vector float __a, vector float __b, vector unsigned int __c) { + return __builtin_vsx_xxblendvw(__a, __b, __c); +} + +static __inline__ vector double __ATTRS_o_ai vec_blendv( + vector double __a, vector double __b, vector unsigned long long __c) { + return __builtin_vsx_xxblendvd(__a, __b, __c); +} +#endif /* __VSX__ */ #endif /* __POWER10_VECTOR__ */ #undef __ATTRS_o_ai Index: clang/lib/Sema/SemaChecking.cpp =================================================================== --- clang/lib/Sema/SemaChecking.cpp +++ clang/lib/Sema/SemaChecking.cpp @@ -3132,6 +3132,8 @@ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); case PPC::BI__builtin_altivec_vsrdbi: return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); + case PPC::BI__builtin_vsx_xxpermx: + return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } Index: clang/test/CodeGen/builtins-ppc-p10vector.c =================================================================== --- clang/test/CodeGen/builtins-ppc-p10vector.c +++ clang/test/CodeGen/builtins-ppc-p10vector.c @@ -14,6 +14,8 @@ vector signed long long vslla, vsllb; vector unsigned long long vulla, vullb, vullc; vector unsigned __int128 vui128a, vui128b, vui128c; +vector float vfa, vfb; +vector double vda, vdb; unsigned int uia; vector unsigned long long test_vpdepd(void) { @@ -257,3 +259,133 @@ // CHECK-NEXT: ret <2 x i64> return vec_srdb(vulla, vullb, 7); } + +vector signed char test_vec_permx_sc(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: ret <16 x i8> + return vec_permx(vsca, vscb, vucc, 0); +} + +vector unsigned char test_vec_permx_uc(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: ret <16 x i8> + return vec_permx(vuca, vucb, vucc, 1); +} + +vector signed short test_vec_permx_ss(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16> + // CHECK-NEXT: ret <8 x i16> + return vec_permx(vssa, vssb, vucc, 2); +} + +vector unsigned short test_vec_permx_us(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16> + // CHECK-NEXT: ret <8 x i16> + return vec_permx(vusa, vusb, vucc, 3); +} + +vector signed int test_vec_permx_si(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32> + // CHECK-NEXT: ret <4 x i32> + return vec_permx(vsia, vsib, vucc, 4); +} + +vector unsigned int test_vec_permx_ui(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32> + // CHECK-NEXT: ret <4 x i32> + return vec_permx(vuia, vuib, vucc, 5); +} + +vector signed long long test_vec_permx_sll(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64> + // CHECK-NEXT: ret <2 x i64> + return vec_permx(vslla, vsllb, vucc, 6); +} + +vector unsigned long long test_vec_permx_ull(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64> + // CHECK-NEXT: ret <2 x i64> + return vec_permx(vulla, vullb, vucc, 7); +} + +vector float test_vec_permx_f(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x float> + // CHECK-NEXT: ret <4 x float> + return vec_permx(vfa, vfb, vucc, 0); +} + +vector double test_vec_permx_d(void) { + // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x double> + // CHECK-NEXT: ret <2 x double> + return vec_permx(vda, vdb, vucc, 1); +} + +vector signed char test_vec_blend_sc(void) { + // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> + // CHECK-NEXT: ret <16 x i8> + return vec_blendv(vsca, vscb, vucc); +} + +vector unsigned char test_vec_blend_uc(void) { + // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> + // CHECK-NEXT: ret <16 x i8> + return vec_blendv(vuca, vucb, vucc); +} + +vector signed short test_vec_blend_ss(void) { + // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16> + // CHECK-NEXT: ret <8 x i16> + return vec_blendv(vssa, vssb, vusc); +} + +vector unsigned short test_vec_blend_us(void) { + // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16> + // CHECK-NEXT: ret <8 x i16> + return vec_blendv(vusa, vusb, vusc); +} + +vector signed int test_vec_blend_si(void) { + // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32> + // CHECK-NEXT: ret <4 x i32> + return vec_blendv(vsia, vsib, vuic); +} + +vector unsigned int test_vec_blend_ui(void) { + // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32> + // CHECK-NEXT: ret <4 x i32> + return vec_blendv(vuia, vuib, vuic); +} + +vector signed long long test_vec_blend_sll(void) { + // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> + // CHECK-NEXT: ret <2 x i64> + return vec_blendv(vslla, vsllb, vullc); +} + +vector unsigned long long test_vec_blend_ull(void) { + // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> + // CHECK-NEXT: ret <2 x i64> + return vec_blendv(vulla, vullb, vullc); +} + +vector float test_vec_blend_f(void) { + // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32> + // CHECK-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float> + // CHECK-NEXT: ret <4 x float> + return vec_blendv(vfa, vfb, vuic); +} + +vector double test_vec_blend_d(void) { + // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> + // CHECK-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double> + // CHECK-NEXT: ret <2 x double> + return vec_blendv(vda, vdb, vullc); +} Index: llvm/include/llvm/IR/IntrinsicsPowerPC.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -1017,6 +1017,27 @@ def int_ppc_vsx_xxgenpcvdm : PowerPC_VSX_Intrinsic<"xxgenpcvdm", [llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>; + +// P10 VSX Vector permute extended. +def int_ppc_vsx_xxpermx : + GCCBuiltin<"__builtin_vsx_xxpermx">, + Intrinsic<[llvm_v16i8_ty], + [llvm_v16i8_ty,llvm_v16i8_ty,llvm_v16i8_ty,llvm_i32_ty], + [IntrNoMem, ImmArg>]>; + +// P10 VSX Vector Blend Variable. +def int_ppc_vsx_xxblendvb: GCCBuiltin<"__builtin_vsx_xxblendvb">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty,llvm_v16i8_ty], [IntrNoMem]>; +def int_ppc_vsx_xxblendvh: GCCBuiltin<"__builtin_vsx_xxblendvh">, + Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, + llvm_v8i16_ty,llvm_v8i16_ty], [IntrNoMem]>; +def int_ppc_vsx_xxblendvw: GCCBuiltin<"__builtin_vsx_xxblendvw">, + Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, + llvm_v4i32_ty,llvm_v4i32_ty], [IntrNoMem]>; +def int_ppc_vsx_xxblendvd: GCCBuiltin<"__builtin_vsx_xxblendvd">, + Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, + llvm_v2i64_ty,llvm_v2i64_ty], [IntrNoMem]>; } //===----------------------------------------------------------------------===// Index: llvm/lib/Target/PowerPC/PPCInstrPrefix.td =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrPrefix.td +++ llvm/lib/Target/PowerPC/PPCInstrPrefix.td @@ -821,6 +821,29 @@ } //---------------------------- Anonymous Patterns ----------------------------// +let Predicates = [PrefixInstrs] in { + def : + Pat<(v16i8 (int_ppc_vsx_xxpermx v16i8:$A, v16i8:$B, v16i8:$C, timm:$D)), + (COPY_TO_REGCLASS + (XXPERMX (COPY_TO_REGCLASS $A, VSRC), + (COPY_TO_REGCLASS $B, VSRC), + (COPY_TO_REGCLASS $C, VSRC), $D), VSRC)>; + def : Pat<(v16i8 (int_ppc_vsx_xxblendvb v16i8:$A, v16i8:$B, v16i8:$C)), + (COPY_TO_REGCLASS + (XXBLENDVB (COPY_TO_REGCLASS $A, VSRC), + (COPY_TO_REGCLASS $B, VSRC), + (COPY_TO_REGCLASS $C, VSRC)), VSRC)>; + def : Pat<(v8i16 (int_ppc_vsx_xxblendvh v8i16:$A, v8i16:$B, v8i16:$C)), + (COPY_TO_REGCLASS + (XXBLENDVH (COPY_TO_REGCLASS $A, VSRC), + (COPY_TO_REGCLASS $B, VSRC), + (COPY_TO_REGCLASS $C, VSRC)), VSRC)>; + def : Pat<(int_ppc_vsx_xxblendvw v4i32:$A, v4i32:$B, v4i32:$C), + (XXBLENDVW $A, $B, $C)>; + def : Pat<(int_ppc_vsx_xxblendvd v2i64:$A, v2i64:$B, v2i64:$C), + (XXBLENDVD $A, $B, $C)>; +} + let Predicates = [IsISA3_1] in { def : Pat<(v16i8 (int_ppc_vsx_xxgenpcvbm v16i8:$VRB, imm:$IMM)), (v16i8 (COPY_TO_REGCLASS (XXGENPCVBM $VRB, imm:$IMM), VRRC))>; Index: llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll =================================================================== --- llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll +++ llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll @@ -26,3 +26,58 @@ ret <16 x i8> %0 } declare <16 x i8> @llvm.ppc.altivec.vsrdbi(<16 x i8>, <16 x i8>, i32 immarg) + +define <16 x i8> @testXXPERMX(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: testXXPERMX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxpermx v2, v2, v3, v4, 1 +; CHECK-NEXT: blr +entry: + %0 = tail call <16 x i8> @llvm.ppc.vsx.xxpermx(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, i32 1) + ret <16 x i8> %0 +} +declare <16 x i8> @llvm.ppc.vsx.xxpermx(<16 x i8>, <16 x i8>, <16 x i8>, i32 immarg) + +define <16 x i8> @testXXBLENDVB(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) { +; CHECK-LABEL: testXXBLENDVB: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxblendvb v2, v2, v3, v4 +; CHECK-NEXT: blr +entry: + %0 = tail call <16 x i8> @llvm.ppc.vsx.xxblendvb(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c) + ret <16 x i8> %0 +} +declare <16 x i8> @llvm.ppc.vsx.xxblendvb(<16 x i8>, <16 x i8>, <16 x i8>) + +define <8 x i16> @testXXBLENDVH(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) { +; CHECK-LABEL: testXXBLENDVH: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxblendvh v2, v2, v3, v4 +; CHECK-NEXT: blr +entry: + %0 = tail call <8 x i16> @llvm.ppc.vsx.xxblendvh(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) + ret <8 x i16> %0 +} +declare <8 x i16> @llvm.ppc.vsx.xxblendvh(<8 x i16>, <8 x i16>, <8 x i16>) + +define <4 x i32> @testXXBLENDVW(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { +; CHECK-LABEL: testXXBLENDVW: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxblendvw v2, v2, v3, v4 +; CHECK-NEXT: blr +entry: + %0 = tail call <4 x i32> @llvm.ppc.vsx.xxblendvw(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) + ret <4 x i32> %0 +} +declare <4 x i32> @llvm.ppc.vsx.xxblendvw(<4 x i32>, <4 x i32>, <4 x i32>) + +define <2 x i64> @testXXBLENDVD(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) { +; CHECK-LABEL: testXXBLENDVD: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxblendvd v2, v2, v3, v4 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.vsx.xxblendvd(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.vsx.xxblendvd(<2 x i64>, <2 x i64>, <2 x i64>)