diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h --- a/clang/lib/Headers/altivec.h +++ b/clang/lib/Headers/altivec.h @@ -17213,6 +17213,56 @@ return __builtin_vsx_xvtlsbb(__a, 0); } #endif /* __VSX__ */ + +/* vs[l | r | ra] */ +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a << (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) { + // return (vector signed __int128)vec_sl((vector unsigned __int128)__a, __b); + return __a << (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) { + return __a >> (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) { + // return (vector signed __int128)vec_sr((vector unsigned __int128)__a, __b); + return (vector signed __int128)( + ((vector unsigned __int128) __a) >> + (__b % (vector unsigned __int128)(sizeof( + unsigned __int128) * + __CHAR_BIT__))); +} + +static __inline__ vector unsigned __int128 __ATTRS_o_ai +vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) { + // return (vector unsigned __int128)vec_sra((vector signed __int128)__a, __b); + return (vector unsigned __int128) (((vector signed __int128)__a) >> (__b % + (vector unsigned __int128)(sizeof( + unsigned __int128) * + __CHAR_BIT__))); +} + +static __inline__ vector signed __int128 __ATTRS_o_ai +vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) { + return __a >> (__b % + (vector unsigned __int128)(sizeof(unsigned __int128) * + __CHAR_BIT__)); +} + #endif /* __POWER10_VECTOR__ */ #undef __ATTRS_o_ai diff --git a/clang/test/CodeGen/builtins-ppc-p10vector.c b/clang/test/CodeGen/builtins-ppc-p10vector.c --- a/clang/test/CodeGen/builtins-ppc-p10vector.c +++ b/clang/test/CodeGen/builtins-ppc-p10vector.c @@ -17,6 +17,7 @@ vector unsigned int vuia, vuib, vuic; vector signed long long vslla, vsllb; vector unsigned long long vulla, vullb, vullc; ++vector signed __int128 vi128a; vector unsigned __int128 vui128a, vui128b, vui128c; vector float vfa, vfb; vector double vda, vdb; @@ -634,3 +635,45 @@ // CHECK-NEXT: ret i32 return vec_test_lsbb_all_zeros(vuca); } + +vector unsigned __int128 test_vec_slq_unsigned (void) { + // CHECK-LABEL: test_vec_slq_unsigned + // CHECK: shl <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> %{{.+}} + return vec_sl(vui128a, vui128b); +} + +vector signed __int128 test_vec_slq_signed (void) { + // CHECK-LABEL: test_vec_slq_signed + // CHECK: shl <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sl(vi128a, vui128a); +} + +vector unsigned __int128 test_vec_srq_unsigned (void) { + // CHECK-LABEL: test_vec_srq_unsigned + // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sr(vui128a, vui128b); +} + +vector signed __int128 test_vec_srq_signed (void) { + // CHECK-LABEL: test_vec_srq_signed + // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sr(vi128a, vui128a); +} + +vector unsigned __int128 test_vec_sraq_unsigned (void) { + // CHECK-LABEL: test_vec_sraq_unsigned + // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sra(vui128a, vui128b); +} + +vector signed __int128 test_vec_sraq_signed (void) { + // CHECK-LABEL: test_vec_sraq_signed + // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}} + // CHECK: ret <1 x i128> + return vec_sra(vi128a, vui128a); +} diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1096,6 +1096,10 @@ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); } + + if (Subtarget.isISA3_1()) { + setOperationAction(ISD::SRA, MVT::v1i128, Legal); + } } if (Subtarget.has64BitSupport()) diff --git a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td --- a/llvm/lib/Target/PowerPC/PPCInstrPrefix.td +++ b/llvm/lib/Target/PowerPC/PPCInstrPrefix.td @@ -1137,6 +1137,22 @@ (EXTRACT_SUBREG (XVTLSBB (COPY_TO_REGCLASS $XB, VSRC)), sub_eq)>; } +/* Vector shifts for ISA3_1 */ +let Predicates = [IsISA3_1] in { + def : Pat<(v1i128 (shl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSLQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (PPCshl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSLQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (srl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (PPCsrl v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (sra v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRAQ v1i128:$VRA, v1i128:$VRB))>; + def : Pat<(v1i128 (PPCsra v1i128:$VRA, v1i128:$VRB)), + (v1i128 (VSRAQ v1i128:$VRA, v1i128:$VRB))>; +} + let AddedComplexity = 400, Predicates = [IsISA3_1] in { def : Pat<(truncstorei8 (i32 (vector_extract v16i8:$rS, 0)), xoaddr:$src), (STXVRBX (COPY_TO_REGCLASS $rS, VSRC), xoaddr:$src)>; diff --git a/llvm/test/CodeGen/PowerPC/p10-vector-shift.ll b/llvm/test/CodeGen/PowerPC/p10-vector-shift.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/p10-vector-shift.ll @@ -0,0 +1,45 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ +; RUN: FileCheck %s + +; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | \ +; RUN: FileCheck %s + +; These tests ensure that vector shift quadword builtins are correctly +; exploited and selected for during codeGen. + +define dso_local <1 x i128> @test_vec_vslq(<1 x i128> %a, <1 x i128> %b) { +; CHECK-LABEL: test_vec_vslq: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vslq v2, v2, v3 +; CHECK-NEXT: blr +entry: + %rem = urem <1 x i128> %b, + %shl = shl <1 x i128> %a, %rem + ret <1 x i128> %shl +} + +define dso_local <1 x i128> @test_vec_vsrq(<1 x i128> %a, <1 x i128> %b) { +; CHECK-LABEL: test_vec_vsrq: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsrq v2, v2, v3 +; CHECK-NEXT: blr +entry: + %rem = urem <1 x i128> %b, + %shr = lshr <1 x i128> %a, %rem + ret <1 x i128> %shr +} + +define dso_local <1 x i128> @test_vec_vsraq(<1 x i128> %a, <1 x i128> %b) { +; CHECK-LABEL: test_vec_vsraq: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsraq v2, v2, v3 +; CHECK-NEXT: blr +entry: + %rem = urem <1 x i128> %b, + %shr = ashr <1 x i128> %a, %rem + ret <1 x i128> %shr +} +