diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def --- a/clang/include/clang/Basic/BuiltinsPPC.def +++ b/clang/include/clang/Basic/BuiltinsPPC.def @@ -336,6 +336,16 @@ BUILTIN(__builtin_altivec_vinswvlx, "V4UiV4UiUiV4Ui", "") BUILTIN(__builtin_altivec_vinswvrx, "V4UiV4UiUiV4Ui", "") +// P10 Vector Extract built-ins. +BUILTIN(__builtin_altivec_vextdubvlx, "V2ULLiV16UcV16UcUi", "") +BUILTIN(__builtin_altivec_vextdubvrx, "V2ULLiV16UcV16UcUi", "") +BUILTIN(__builtin_altivec_vextduhvlx, "V2ULLiV8UsV8UsUi", "") +BUILTIN(__builtin_altivec_vextduhvrx, "V2ULLiV8UsV8UsUi", "") +BUILTIN(__builtin_altivec_vextduwvlx, "V2ULLiV4UiV4UiUi", "") +BUILTIN(__builtin_altivec_vextduwvrx, "V2ULLiV4UiV4UiUi", "") +BUILTIN(__builtin_altivec_vextddvlx, "V2ULLiV2ULLiV2ULLiUi", "") +BUILTIN(__builtin_altivec_vextddvrx, "V2ULLiV2ULLiV2ULLiUi", "") + // VSX built-ins. BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "") diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h --- a/clang/lib/Headers/altivec.h +++ b/clang/lib/Headers/altivec.h @@ -17101,6 +17101,92 @@ #endif } +/* vec_extractl */ + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl( + vector unsigned char __a, vector unsigned char __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextdubvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextdubvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl( + vector unsigned short __a, vector unsigned short __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduhvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduhvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl( + vector unsigned int __a, vector unsigned int __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduwvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduwvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extractl(vector unsigned long long __a, vector unsigned long long __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextddvrx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextddvlx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +/* vec_extracth */ + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth( + vector unsigned char __a, vector unsigned char __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextdubvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextdubvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth( + vector unsigned short __a, vector unsigned short __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduhvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduhvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth( + vector unsigned int __a, vector unsigned int __b, unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextduwvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextduwvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + +static __inline__ vector unsigned long long __ATTRS_o_ai +vec_extracth(vector unsigned long long __a, vector unsigned long long __b, + unsigned int __c) { +#ifdef __LITTLE_ENDIAN__ + return __builtin_altivec_vextddvlx(__a, __b, __c); +#else + vector unsigned long long __ret = __builtin_altivec_vextddvrx(__a, __b, __c); + return vec_sld(__ret, __ret, 8); +#endif +} + #ifdef __VSX__ /* vec_permx */ diff --git a/clang/test/CodeGen/builtins-ppc-p10vector.c b/clang/test/CodeGen/builtins-ppc-p10vector.c --- a/clang/test/CodeGen/builtins-ppc-p10vector.c +++ b/clang/test/CodeGen/builtins-ppc-p10vector.c @@ -569,6 +569,102 @@ return vec_inserth(vuia, vuib, uia); } +vector unsigned long long test_vec_extractl_uc(void) { + // CHECK-BE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extractl(vuca, vucb, uia); +} + +vector unsigned long long test_vec_extractl_us(void) { + // CHECK-BE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extractl(vusa, vusb, uia); +} + +vector unsigned long long test_vec_extractl_ui(void) { + // CHECK-BE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extractl(vuia, vuib, uia); +} + +vector unsigned long long test_vec_extractl_ul(void) { + // CHECK-BE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extractl(vulla, vullb, uia); +} + +vector unsigned long long test_vec_extracth_uc(void) { + // CHECK-BE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extracth(vuca, vucb, uia); +} + +vector unsigned long long test_vec_extracth_us(void) { + // CHECK-BE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extracth(vusa, vusb, uia); +} + +vector unsigned long long test_vec_extracth_ui(void) { + // CHECK-BE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extracth(vuia, vuib, uia); +} + +vector unsigned long long test_vec_extracth_ul(void) { + // CHECK-BE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 + // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32> + // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}}) + // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64> + // CHECK-BE: ret <2 x i64> + // CHECK-LE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 + // CHECK-LE-NEXT: ret <2 x i64> + return vec_extracth(vulla, vullb, uia); +} + vector signed int test_vec_vec_splati_si(void) { // CHECK: ret <4 x i32> return vec_splati(-17); diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td --- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -531,6 +531,39 @@ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i64_ty, llvm_i32_ty], [IntrNoMem, ImmArg>]>; + // P10 Vector Extract. + def int_ppc_altivec_vextdubvlx : GCCBuiltin<"__builtin_altivec_vextdubvlx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextdubvrx : GCCBuiltin<"__builtin_altivec_vextdubvrx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextduhvlx : GCCBuiltin<"__builtin_altivec_vextduhvlx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextduhvrx : GCCBuiltin<"__builtin_altivec_vextduhvrx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextduwvlx : GCCBuiltin<"__builtin_altivec_vextduwvlx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextduwvrx : GCCBuiltin<"__builtin_altivec_vextduwvrx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextddvlx : GCCBuiltin<"__builtin_altivec_vextddvlx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], + [IntrNoMem]>; + def int_ppc_altivec_vextddvrx : GCCBuiltin<"__builtin_altivec_vextddvrx">, + Intrinsic<[llvm_v2i64_ty], + [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], + [IntrNoMem]>; } // Vector average. diff --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td --- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td +++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td @@ -948,37 +948,69 @@ (int_ppc_altivec_vinsdrx v2i64:$vDi, i64:$rA, i64:$rB))]>, RegConstraint<"$vDi = $vD">, NoEncode<"$vDi">; def VEXTDUBVLX : VAForm_1a<24, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextdubvlx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextdubvlx v16i8:$vA, + v16i8:$vB, + i32:$rC))]>; def VEXTDUBVRX : VAForm_1a<25, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextdubvrx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextdubvrx v16i8:$vA, + v16i8:$vB, + i32:$rC))]>; def VEXTDUHVLX : VAForm_1a<26, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextduhvlx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextduhvlx v8i16:$vA, + v8i16:$vB, + i32:$rC))]>; def VEXTDUHVRX : VAForm_1a<27, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextduhvrx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextduhvrx v8i16:$vA, + v8i16:$vB, + i32:$rC))]>; def VEXTDUWVLX : VAForm_1a<28, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextduwvlx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextduwvlx v4i32:$vA, + v4i32:$vB, + i32:$rC))]>; def VEXTDUWVRX : VAForm_1a<29, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextduwvrx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextduwvrx v4i32:$vA, + v4i32:$vB, + i32:$rC))]>; def VEXTDDVLX : VAForm_1a<30, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextddvlx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextddvlx v2i64:$vA, + v2i64:$vB, + i32:$rC))]>; def VEXTDDVRX : VAForm_1a<31, (outs vrrc:$vD), - (ins vrrc:$vA, vrrc:$vB, g8rc:$rC), + (ins vrrc:$vA, vrrc:$vB, gprc:$rC), "vextddvrx $vD, $vA, $vB, $rC", - IIC_VecGeneral, []>; + IIC_VecGeneral, + [(set v2i64:$vD, + (int_ppc_altivec_vextddvrx v2i64:$vA, + v2i64:$vB, + i32:$rC))]>; def VPDEPD : VXForm_1<1485, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), "vpdepd $vD, $vA, $vB", IIC_VecGeneral, [(set v2i64:$vD, diff --git a/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll b/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll --- a/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll +++ b/llvm/test/CodeGen/PowerPC/builtins-ppc-p10permute.ll @@ -253,3 +253,91 @@ ret <2 x i64> %0 } declare <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64>, i64, i32 immarg) + +define <2 x i64> @testVEXTDUBVLX(<16 x i8> %a, <16 x i8> %b, i32 %c) { +; CHECK-LABEL: testVEXTDUBVLX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextdubvlx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextdubvlx(<16 x i8> %a, <16 x i8> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextdubvlx(<16 x i8>, <16 x i8>, i32) + +define <2 x i64> @testVEXTDUBVRX(<16 x i8> %a, <16 x i8> %b, i32 %c) { +; CHECK-LABEL: testVEXTDUBVRX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextdubvrx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextdubvrx(<16 x i8> %a, <16 x i8> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextdubvrx(<16 x i8>, <16 x i8>, i32) + +define <2 x i64> @testVEXTDUHVLX(<8 x i16> %a, <8 x i16> %b, i32 %c) { +; CHECK-LABEL: testVEXTDUHVLX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextduhvlx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextduhvlx(<8 x i16> %a, <8 x i16> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextduhvlx(<8 x i16>, <8 x i16>, i32) + +define <2 x i64> @testVEXTDUHVRX(<8 x i16> %a, <8 x i16> %b, i32 %c) { +; CHECK-LABEL: testVEXTDUHVRX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextduhvrx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextduhvrx(<8 x i16> %a, <8 x i16> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextduhvrx(<8 x i16>, <8 x i16>, i32) + +define <2 x i64> @testVEXTDUWVLX(<4 x i32> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: testVEXTDUWVLX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextduwvlx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextduwvlx(<4 x i32> %a, <4 x i32> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextduwvlx(<4 x i32>, <4 x i32>, i32) + +define <2 x i64> @testVEXTDUWVRX(<4 x i32> %a, <4 x i32> %b, i32 %c) { +; CHECK-LABEL: testVEXTDUWVRX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextduwvrx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextduwvrx(<4 x i32> %a, <4 x i32> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextduwvrx(<4 x i32>, <4 x i32>, i32) + +define <2 x i64> @testVEXTDDVLX(<2 x i64> %a, <2 x i64> %b, i32 %c) { +; CHECK-LABEL: testVEXTDDVLX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextddvlx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextddvlx(<2 x i64> %a, <2 x i64> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextddvlx(<2 x i64>, <2 x i64>, i32) + +define <2 x i64> @testVEXTDDVRX(<2 x i64> %a, <2 x i64> %b, i32 %c) { +; CHECK-LABEL: testVEXTDDVRX: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vextddvrx v2, v2, v3, r7 +; CHECK-NEXT: blr +entry: + %0 = tail call <2 x i64> @llvm.ppc.altivec.vextddvrx(<2 x i64> %a, <2 x i64> %b, i32 %c) + ret <2 x i64> %0 +} +declare <2 x i64> @llvm.ppc.altivec.vextddvrx(<2 x i64>, <2 x i64>, i32)