diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h --- a/clang/lib/Headers/altivec.h +++ b/clang/lib/Headers/altivec.h @@ -17213,6 +17213,56 @@ return __builtin_vsx_xvtlsbb(__a, 0); } #endif /* __VSX__ */ + +/* vs[l | r | ra] */ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a << (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) { + // return (vector signed __int128)vec_sl((vector unsigned __int128)__a, __b); + return __a << (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a >> (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) { + // return (vector signed __int128)vec_sr((vector unsigned __int128)__a, __b); + return (vector signed __int128)( + ((vector unsigned __int128) __a) >> + (__b % (vector unsigned __int128)(sizeof( + unsigned __int128) * + __CHAR_BIT__))); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) { + // return (vector unsigned __int128)vec_sra((vector signed __int128)__a, __b); + return (vector unsigned __int128) (((vector signed __int128)__a) >> (__b % + (vector unsigned __int128)(sizeof( + unsigned __int128) * + __CHAR_BIT__))); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) { + return __a >> (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + #endif /* __POWER10_VECTOR__ */ #undef __ATTRS_o_ai diff --git a/clang/test/CodeGen/builtins-ppc-p10vector.c b/clang/test/CodeGen/builtins-ppc-p10vector.c --- a/clang/test/CodeGen/builtins-ppc-p10vector.c +++ b/clang/test/CodeGen/builtins-ppc-p10vector.c @@ -17,6 +17,7 @@ vector unsigned int vuia, vuib, vuic; vector signed long long vslla, vsllb; vector unsigned long long vulla, vullb, vullc; ++vector signed __int128 vi128a; vector unsigned __int128 vui128a, vui128b, vui128c; vector float vfa, vfb; vector double vda, vdb; @@ -634,3 +635,45 @@ // CHECK-NEXT: ret i32 return vec_test_lsbb_all_zeros(vuca); } + +vector unsigned __int128 test_vec_slq_unsigned (void) { + // CHECK-LABEL: test_vec_slq_unsigned + // CHECK: shl <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> %{{.+}} + return vec_sl(vui128a, vui128b); +} + +vector signed __int128 test_vec_slq_signed (void) { + // CHECK-LABEL: test_vec_slq_signed + // CHECK: shl <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sl(vi128a, vui128a); +} + +vector unsigned __int128 test_vec_srq_unsigned (void) { + // CHECK-LABEL: test_vec_srq_unsigned + // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sr(vui128a, vui128b); +} + +vector signed __int128 test_vec_srq_signed (void) { + // CHECK-LABEL: test_vec_srq_signed + // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sr(vi128a, vui128a); +} + +vector unsigned __int128 test_vec_sraq_unsigned (void) { + // CHECK-LABEL: test_vec_sraq_unsigned + // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sra(vui128a, vui128b); +} + +vector signed __int128 test_vec_sraq_signed (void) { + // CHECK-LABEL: test_vec_sraq_signed + // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sra(vi128a, vui128a); +} diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1096,6 +1096,10 @@ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); } + + if (Subtarget.isISA3_1()) { + setOperationAction(ISD::SRA, MVT::v1i128, Legal); + } } if (Subtarget.has64BitSupport()) diff --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td --- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td +++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td @@ -1137,6 +1137,22 @@ (EXTRACT_SUBREG (XVTLSBB (COPY_TO_REGCLASS $XB, VSRC)), sub_eq)>; } +/* Vector shifts for ISA3_1 */ +let Predicates = [IsISA3_1] in { + def : Pat<(v1i128 (shl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSLQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (PPCshl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSLQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (srl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (PPCsrl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (sra v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRAQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (PPCsra v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRAQ v1i128:$VRA, v1i128:$VRB))>; +} + let AddedComplexity = 400, Predicates = [IsISA3_1] in { def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$rS, 0)), xoaddr:$src), (STXVRBX (COPY_TO_REGCLASS $rS, VSRC), xoaddr:$src)>;