diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def --- a/clang/include/clang/Basic/BuiltinsPPC.def +++ b/clang/include/clang/Basic/BuiltinsPPC.def @@ -75,6 +75,8 @@ BUILTIN(__builtin_ppc_fctuwz, "dd", "") BUILTIN(__builtin_ppc_swdiv_nochk, "ddd", "") BUILTIN(__builtin_ppc_swdivs_nochk, "fff", "") +BUILTIN(__builtin_ppc_alignx, "vIivC*", "nc") +BUILTIN(__builtin_ppc_rdlam, "UWiUWiUWiUWIi", "nc") // Compare BUILTIN(__builtin_ppc_cmpeqb, "LLiLLiLLi", "") BUILTIN(__builtin_ppc_cmprb, "iCIiii", "") diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp --- a/clang/lib/Basic/Targets/PPC.cpp +++ b/clang/lib/Basic/Targets/PPC.cpp @@ -168,6 +168,46 @@ Builder.defineMacro("__fres", "__builtin_ppc_fres"); Builder.defineMacro("__swdiv_nochk", "__builtin_ppc_swdiv_nochk"); Builder.defineMacro("__swdivs_nochk", "__builtin_ppc_swdivs_nochk"); + Builder.defineMacro("__alloca", "__builtin_alloca"); + Builder.defineMacro("__vcipher", "__builtin_altivec_crypto_vcipher"); + Builder.defineMacro("__vcipherlast", "__builtin_altivec_crypto_vcipherlast"); + Builder.defineMacro("__vncipher", "__builtin_altivec_crypto_vncipher"); + Builder.defineMacro("__vncipherlast", + "__builtin_altivec_crypto_vncipherlast"); + Builder.defineMacro("__vpermxor", "__builtin_altivec_crypto_vpermxor"); + Builder.defineMacro("__vpmsumb", "__builtin_altivec_crypto_vpmsumb"); + Builder.defineMacro("__vpmsumd", "__builtin_altivec_crypto_vpmsumd"); + Builder.defineMacro("__vpmsumh", "__builtin_altivec_crypto_vpmsumh"); + Builder.defineMacro("__vpmsumw", "__builtin_altivec_crypto_vpmsumw"); + Builder.defineMacro("__divde", "__builtin_divde"); + Builder.defineMacro("__divwe", "__builtin_divwe"); + Builder.defineMacro("__divdeu", "__builtin_divdeu"); + Builder.defineMacro("__divweu", "__builtin_divweu"); + Builder.defineMacro("__alignx", "__builtin_ppc_alignx"); + Builder.defineMacro("__bcopy", "bcopy"); + Builder.defineMacro("__bpermd", "__builtin_bpermd"); + Builder.defineMacro("__cntlz4", "__builtin_clz"); + Builder.defineMacro("__cntlz8", "__builtin_clzll"); + Builder.defineMacro("__cmplx", "__builtin_complex"); + Builder.defineMacro("__cmplxf", "__builtin_complex"); + Builder.defineMacro("__cnttz4", "__builtin_ctz"); + Builder.defineMacro("__cnttz8", "__builtin_ctzll"); + Builder.defineMacro("__darn", "__builtin_darn"); + Builder.defineMacro("__darn_32", "__builtin_darn_32"); + Builder.defineMacro("__darn_raw", "__builtin_darn_raw"); + Builder.defineMacro("__dcbf", "__builtin_dcbf"); + Builder.defineMacro("__fmadd", "__builtin_fma"); + Builder.defineMacro("__fmadds", "__builtin_fmaf"); + Builder.defineMacro("__labs", "__builtin_labs"); + Builder.defineMacro("__llabs", "__builtin_llabs"); + Builder.defineMacro("__popcnt4", "__builtin_popcount"); + Builder.defineMacro("__popcnt8", "__builtin_popcountll"); + Builder.defineMacro("__readflm", "__builtin_readflm"); + Builder.defineMacro("__rotatel4", "__builtin_rotateleft32"); + Builder.defineMacro("__rotatel8", "__builtin_rotateleft64"); + Builder.defineMacro("__rdlam", "__builtin_ppc_rdlam"); + Builder.defineMacro("__setflm", "__builtin_setflm"); + Builder.defineMacro("__setrnd", "__builtin_setrnd"); } /// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -15293,7 +15293,24 @@ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); return Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt"); } + case PPC::BI__builtin_ppc_alignx: { + ConstantInt *AlignmentCI = cast(Ops[0]); + if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment)) + AlignmentCI = ConstantInt::get(AlignmentCI->getType(), + llvm::Value::MaximumAlignment); + emitAlignmentAssumption(Ops[1], E->getArg(1), + /*The expr loc is sufficient.*/ SourceLocation(), + AlignmentCI, nullptr); + return Ops[1]; + } + case PPC::BI__builtin_ppc_rdlam: { + llvm::Type *Ty = Ops[0]->getType(); + Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false); + Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty); + Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt}); + return Builder.CreateAnd(Rotate, Ops[2]); + } // FMA variations case PPC::BI__builtin_vsx_xvmaddadp: case PPC::BI__builtin_vsx_xvmaddasp: diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -3429,6 +3429,10 @@ case PPC::BI__builtin_ppc_mtfsfi: return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); + case PPC::BI__builtin_ppc_alignx: + return SemaBuiltinConstantArgPower2(TheCall, 0); + case PPC::BI__builtin_ppc_rdlam: + return SemaValueIsRunOfOnes(TheCall, 2); #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ case PPC::BI__builtin_##Name: \ return SemaBuiltinPPCMMACall(TheCall, Types); diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-cipher.c b/clang/test/CodeGen/builtins-ppc-xlcompat-cipher.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-cipher.c @@ -0,0 +1,91 @@ +// REQUIRES: powerpc-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc64-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s +// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s + +// All of these cipher builtins are only for Power 8 and up. + +// CHECK-LABEL: @testvcipher( +// CHECK: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vcipher +// CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8> +// CHECK-NEXT: ret <16 x i8> [[TMP5]] +// +vector unsigned char testvcipher(vector unsigned char state_array, vector unsigned char round_key) { + return __vcipher(state_array, round_key); +} + +// CHECK-LABEL: @testvcipherlast( +// CHECK: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vcipherlast +// CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8> +// CHECK-NEXT: ret <16 x i8> [[TMP5]] +// +vector unsigned char testvcipherlast(vector unsigned char state_array, vector unsigned char round_key) { + return __vcipherlast(state_array, round_key); +} + +// CHECK-LABEL: @testvncipher( +// CHECK: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vncipher +// CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8> +// CHECK-NEXT: ret <16 x i8> [[TMP5]] +// +vector unsigned char testvncipher(vector unsigned char state_array, vector unsigned char round_key) { + return __vncipher(state_array, round_key); +} + +// CHECK-LABEL: @testvncipherlast( +// CHECK: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vncipherlast +// CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8> +// CHECK-NEXT: ret <16 x i8> [[TMP5]] +// +vector unsigned char testvncipherlast(vector unsigned char state_array, vector unsigned char round_key) { + return __vncipherlast(state_array, round_key); +} + +// CHECK-LABEL: @testvpermxor( +// CHECK: [[TMP3:%.*]] = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor +// CHECK-NEXT: ret <16 x i8> [[TMP3]] +// +vector unsigned char testvpermxor(vector unsigned char a, vector unsigned char b, vector unsigned char mask) { + return __vpermxor(a, b, mask); +} + +// CHECK-LABEL: @testvpmsumb( +// CHECK: [[TMP2:%.*]] = call <16 x i8> @llvm.ppc.altivec.crypto.vpmsumb +// CHECK-NEXT: ret <16 x i8> [[TMP2]] +// +vector unsigned char testvpmsumb(vector unsigned char a, vector unsigned char b) { + return __vpmsumb(a, b); +} + +// CHECK-LABEL: @testvpmsumd( +// CHECK: [[TMP2:%.*]] = call <2 x i64> @llvm.ppc.altivec.crypto.vpmsumd +// CHECK-NEXT: ret <2 x i64> [[TMP2]] +// +vector unsigned long long testvpmsumd(vector unsigned long long a, vector unsigned long long b) { + return __vpmsumd(a, b); +} + +// CHECK-LABEL: @testvpmsumh( +// CHECK: [[TMP2:%.*]] = call <8 x i16> @llvm.ppc.altivec.crypto.vpmsumh +// CHECK-NEXT: ret <8 x i16> [[TMP2]] +// +vector unsigned short testvpmsumh(vector unsigned short a, vector unsigned short b) { + return __vpmsumh(a, b); +} + +// CHECK-LABEL: @testvpmsumw( +// CHECK: [[TMP2:%.*]] = call <4 x i32> @llvm.ppc.altivec.crypto.vpmsumw +// CHECK-NEXT: ret <4 x i32> [[TMP2]] +// +vector unsigned int testvpmsumw(vector unsigned int a, vector unsigned int b) { + return __vpmsumw(a, b); +} diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-cmplx.c b/clang/test/CodeGen/builtins-ppc-xlcompat-cmplx.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-cmplx.c @@ -0,0 +1,228 @@ +// REQUIRES: powerpc-registered-target +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=64BIT +// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s --check-prefix=64BITLE +// RUN: %clang_cc1 -triple powerpc64-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=64BITAIX +// RUN: %clang_cc1 -triple powerpc-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=32BIT +// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s --check-prefix=32BITLE +// RUN: %clang_cc1 -triple powerpc-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=32BITAIX + +// 64BIT-LABEL: @testcmplx( +// 64BIT-NEXT: entry: +// 64BIT-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8 +// 64BIT-NEXT: [[REAL_ADDR:%.*]] = alloca double, align 8 +// 64BIT-NEXT: [[IMAG_ADDR:%.*]] = alloca double, align 8 +// 64BIT-NEXT: store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8 +// 64BIT-NEXT: store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8 +// 64BIT-NEXT: [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8 +// 64BIT-NEXT: [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8 +// 64BIT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0 +// 64BIT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1 +// 64BIT-NEXT: store double [[TMP0]], double* [[RETVAL_REALP]], align 8 +// 64BIT-NEXT: store double [[TMP1]], double* [[RETVAL_IMAGP]], align 8 +// 64BIT-NEXT: [[TMP2:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 8 +// 64BIT-NEXT: ret { double, double } [[TMP2]] +// +// 64BITLE-LABEL: @testcmplx( +// 64BITLE-NEXT: entry: +// 64BITLE-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8 +// 64BITLE-NEXT: [[REAL_ADDR:%.*]] = alloca double, align 8 +// 64BITLE-NEXT: [[IMAG_ADDR:%.*]] = alloca double, align 8 +// 64BITLE-NEXT: store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8 +// 64BITLE-NEXT: store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8 +// 64BITLE-NEXT: [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8 +// 64BITLE-NEXT: [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8 +// 64BITLE-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0 +// 64BITLE-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1 +// 64BITLE-NEXT: store double [[TMP0]], double* [[RETVAL_REALP]], align 8 +// 64BITLE-NEXT: store double [[TMP1]], double* [[RETVAL_IMAGP]], align 8 +// 64BITLE-NEXT: [[TMP2:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 8 +// 64BITLE-NEXT: ret { double, double } [[TMP2]] +// +// 64BITAIX-LABEL: @testcmplx( +// 64BITAIX-NEXT: entry: +// 64BITAIX-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 4 +// 64BITAIX-NEXT: [[REAL_ADDR:%.*]] = alloca double, align 8 +// 64BITAIX-NEXT: [[IMAG_ADDR:%.*]] = alloca double, align 8 +// 64BITAIX-NEXT: store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8 +// 64BITAIX-NEXT: store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8 +// 64BITAIX-NEXT: [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8 +// 64BITAIX-NEXT: [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8 +// 64BITAIX-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0 +// 64BITAIX-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1 +// 64BITAIX-NEXT: store double [[TMP0]], double* [[RETVAL_REALP]], align 4 +// 64BITAIX-NEXT: store double [[TMP1]], double* [[RETVAL_IMAGP]], align 4 +// 64BITAIX-NEXT: [[TMP2:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 4 +// 64BITAIX-NEXT: ret { double, double } [[TMP2]] +// +// 32BIT-LABEL: @testcmplx( +// 32BIT-NEXT: entry: +// 32BIT-NEXT: [[REAL_ADDR:%.*]] = alloca double, align 8 +// 32BIT-NEXT: [[IMAG_ADDR:%.*]] = alloca double, align 8 +// 32BIT-NEXT: store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8 +// 32BIT-NEXT: store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8 +// 32BIT-NEXT: [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8 +// 32BIT-NEXT: [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8 +// 32BIT-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT:%.*]], i32 0, i32 0 +// 32BIT-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1 +// 32BIT-NEXT: store double [[TMP0]], double* [[AGG_RESULT_REALP]], align 8 +// 32BIT-NEXT: store double [[TMP1]], double* [[AGG_RESULT_IMAGP]], align 8 +// 32BIT-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0 +// 32BIT-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, double* [[AGG_RESULT_REALP1]], align 8 +// 32BIT-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1 +// 32BIT-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, double* [[AGG_RESULT_IMAGP2]], align 8 +// 32BIT-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0 +// 32BIT-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1 +// 32BIT-NEXT: store double [[AGG_RESULT_REAL]], double* [[AGG_RESULT_REALP3]], align 8 +// 32BIT-NEXT: store double [[AGG_RESULT_IMAG]], double* [[AGG_RESULT_IMAGP4]], align 8 +// 32BIT-NEXT: ret void +// +// 32BITLE-LABEL: @testcmplx( +// 32BITLE-NEXT: entry: +// 32BITLE-NEXT: [[REAL_ADDR:%.*]] = alloca double, align 8 +// 32BITLE-NEXT: [[IMAG_ADDR:%.*]] = alloca double, align 8 +// 32BITLE-NEXT: store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8 +// 32BITLE-NEXT: store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8 +// 32BITLE-NEXT: [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8 +// 32BITLE-NEXT: [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8 +// 32BITLE-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT:%.*]], i32 0, i32 0 +// 32BITLE-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1 +// 32BITLE-NEXT: store double [[TMP0]], double* [[AGG_RESULT_REALP]], align 8 +// 32BITLE-NEXT: store double [[TMP1]], double* [[AGG_RESULT_IMAGP]], align 8 +// 32BITLE-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0 +// 32BITLE-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, double* [[AGG_RESULT_REALP1]], align 8 +// 32BITLE-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1 +// 32BITLE-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, double* [[AGG_RESULT_IMAGP2]], align 8 +// 32BITLE-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 0 +// 32BITLE-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[AGG_RESULT]], i32 0, i32 1 +// 32BITLE-NEXT: store double [[AGG_RESULT_REAL]], double* [[AGG_RESULT_REALP3]], align 8 +// 32BITLE-NEXT: store double [[AGG_RESULT_IMAG]], double* [[AGG_RESULT_IMAGP4]], align 8 +// 32BITLE-NEXT: ret void +// +// 32BITAIX-LABEL: @testcmplx( +// 32BITAIX-NEXT: entry: +// 32BITAIX-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 4 +// 32BITAIX-NEXT: [[REAL_ADDR:%.*]] = alloca double, align 8 +// 32BITAIX-NEXT: [[IMAG_ADDR:%.*]] = alloca double, align 8 +// 32BITAIX-NEXT: store double [[REAL:%.*]], double* [[REAL_ADDR]], align 8 +// 32BITAIX-NEXT: store double [[IMAG:%.*]], double* [[IMAG_ADDR]], align 8 +// 32BITAIX-NEXT: [[TMP0:%.*]] = load double, double* [[REAL_ADDR]], align 8 +// 32BITAIX-NEXT: [[TMP1:%.*]] = load double, double* [[IMAG_ADDR]], align 8 +// 32BITAIX-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 0 +// 32BITAIX-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { double, double }, { double, double }* [[RETVAL]], i32 0, i32 1 +// 32BITAIX-NEXT: store double [[TMP0]], double* [[RETVAL_REALP]], align 4 +// 32BITAIX-NEXT: store double [[TMP1]], double* [[RETVAL_IMAGP]], align 4 +// 32BITAIX-NEXT: [[TMP2:%.*]] = load { double, double }, { double, double }* [[RETVAL]], align 4 +// 32BITAIX-NEXT: ret { double, double } [[TMP2]] +// +double _Complex testcmplx(double real, double imag) { + return __cmplx(real, imag); +} + +// 64BIT-LABEL: @testcmplxf( +// 64BIT-NEXT: entry: +// 64BIT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4 +// 64BIT-NEXT: [[REAL_ADDR:%.*]] = alloca float, align 4 +// 64BIT-NEXT: [[IMAG_ADDR:%.*]] = alloca float, align 4 +// 64BIT-NEXT: store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4 +// 64BIT-NEXT: store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4 +// 64BIT-NEXT: [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4 +// 64BIT-NEXT: [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4 +// 64BIT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0 +// 64BIT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1 +// 64BIT-NEXT: store float [[TMP0]], float* [[RETVAL_REALP]], align 4 +// 64BIT-NEXT: store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4 +// 64BIT-NEXT: [[TMP2:%.*]] = load { float, float }, { float, float }* [[RETVAL]], align 4 +// 64BIT-NEXT: ret { float, float } [[TMP2]] +// +// 64BITLE-LABEL: @testcmplxf( +// 64BITLE-NEXT: entry: +// 64BITLE-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4 +// 64BITLE-NEXT: [[REAL_ADDR:%.*]] = alloca float, align 4 +// 64BITLE-NEXT: [[IMAG_ADDR:%.*]] = alloca float, align 4 +// 64BITLE-NEXT: store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4 +// 64BITLE-NEXT: store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4 +// 64BITLE-NEXT: [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4 +// 64BITLE-NEXT: [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4 +// 64BITLE-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0 +// 64BITLE-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1 +// 64BITLE-NEXT: store float [[TMP0]], float* [[RETVAL_REALP]], align 4 +// 64BITLE-NEXT: store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4 +// 64BITLE-NEXT: [[TMP2:%.*]] = load { float, float }, { float, float }* [[RETVAL]], align 4 +// 64BITLE-NEXT: ret { float, float } [[TMP2]] +// +// 64BITAIX-LABEL: @testcmplxf( +// 64BITAIX-NEXT: entry: +// 64BITAIX-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4 +// 64BITAIX-NEXT: [[REAL_ADDR:%.*]] = alloca float, align 4 +// 64BITAIX-NEXT: [[IMAG_ADDR:%.*]] = alloca float, align 4 +// 64BITAIX-NEXT: store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4 +// 64BITAIX-NEXT: store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4 +// 64BITAIX-NEXT: [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4 +// 64BITAIX-NEXT: [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4 +// 64BITAIX-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0 +// 64BITAIX-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1 +// 64BITAIX-NEXT: store float [[TMP0]], float* [[RETVAL_REALP]], align 4 +// 64BITAIX-NEXT: store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4 +// 64BITAIX-NEXT: [[TMP2:%.*]] = load { float, float }, { float, float }* [[RETVAL]], align 4 +// 64BITAIX-NEXT: ret { float, float } [[TMP2]] +// +// 32BIT-LABEL: @testcmplxf( +// 32BIT-NEXT: entry: +// 32BIT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4 +// 32BIT-NEXT: [[REAL_ADDR:%.*]] = alloca float, align 4 +// 32BIT-NEXT: [[IMAG_ADDR:%.*]] = alloca float, align 4 +// 32BIT-NEXT: store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4 +// 32BIT-NEXT: store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4 +// 32BIT-NEXT: [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4 +// 32BIT-NEXT: [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4 +// 32BIT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0 +// 32BIT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1 +// 32BIT-NEXT: store float [[TMP0]], float* [[RETVAL_REALP]], align 4 +// 32BIT-NEXT: store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4 +// 32BIT-NEXT: [[TMP2:%.*]] = bitcast { float, float }* [[RETVAL]] to i64* +// 32BIT-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 4 +// 32BIT-NEXT: ret i64 [[TMP3]] +// +// 32BITLE-LABEL: @testcmplxf( +// 32BITLE-NEXT: entry: +// 32BITLE-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4 +// 32BITLE-NEXT: [[REAL_ADDR:%.*]] = alloca float, align 4 +// 32BITLE-NEXT: [[IMAG_ADDR:%.*]] = alloca float, align 4 +// 32BITLE-NEXT: store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4 +// 32BITLE-NEXT: store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4 +// 32BITLE-NEXT: [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4 +// 32BITLE-NEXT: [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4 +// 32BITLE-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0 +// 32BITLE-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1 +// 32BITLE-NEXT: store float [[TMP0]], float* [[RETVAL_REALP]], align 4 +// 32BITLE-NEXT: store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4 +// 32BITLE-NEXT: [[TMP2:%.*]] = bitcast { float, float }* [[RETVAL]] to i64* +// 32BITLE-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP2]], align 4 +// 32BITLE-NEXT: ret i64 [[TMP3]] +// +// 32BITAIX-LABEL: @testcmplxf( +// 32BITAIX-NEXT: entry: +// 32BITAIX-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4 +// 32BITAIX-NEXT: [[REAL_ADDR:%.*]] = alloca float, align 4 +// 32BITAIX-NEXT: [[IMAG_ADDR:%.*]] = alloca float, align 4 +// 32BITAIX-NEXT: store float [[REAL:%.*]], float* [[REAL_ADDR]], align 4 +// 32BITAIX-NEXT: store float [[IMAG:%.*]], float* [[IMAG_ADDR]], align 4 +// 32BITAIX-NEXT: [[TMP0:%.*]] = load float, float* [[REAL_ADDR]], align 4 +// 32BITAIX-NEXT: [[TMP1:%.*]] = load float, float* [[IMAG_ADDR]], align 4 +// 32BITAIX-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 0 +// 32BITAIX-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds { float, float }, { float, float }* [[RETVAL]], i32 0, i32 1 +// 32BITAIX-NEXT: store float [[TMP0]], float* [[RETVAL_REALP]], align 4 +// 32BITAIX-NEXT: store float [[TMP1]], float* [[RETVAL_IMAGP]], align 4 +// 32BITAIX-NEXT: [[TMP2:%.*]] = load { float, float }, { float, float }* [[RETVAL]], align 4 +// 32BITAIX-NEXT: ret { float, float } [[TMP2]] +// +float _Complex testcmplxf(float real, float imag) { + return __cmplxf(real, imag); +} diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-darn.c b/clang/test/CodeGen/builtins-ppc-xlcompat-darn.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-darn.c @@ -0,0 +1,40 @@ +// REQUIRES: powerpc-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr9 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr9 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc64-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr9 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr9 | FileCheck %s +// RUN: %clang_cc1 -triple powerpcle-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr9 | FileCheck %s +// RUN: %clang_cc1 -triple powerpc-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr9 | FileCheck %s + +// The darn class of builtins are Power 9 and up and only darn_32 works in +// 32 bit mode. + +// CHECK-LABEL: @testdarn( +// CHECK: [[TMP0:%.*]] = call i64 @llvm.ppc.darn() +// CHECK-NEXT: ret i64 [[TMP0]] +// +long long testdarn(void) { + return __darn(); +} + +// CHECK-LABEL: @testdarn_raw( +// CHECK: [[TMP0:%.*]] = call i64 @llvm.ppc.darnraw() +// CHECK-NEXT: ret i64 [[TMP0]] +// +long long testdarn_raw(void) { + return __darn_raw(); +} + +// CHECK-LABEL: @testdarn_32( +// CHECK: [[TMP0:%.*]] = call i32 @llvm.ppc.darn32() +// CHECK-NEXT: ret i32 [[TMP0]] +// +int testdarn_32(void) { + return __darn_32(); +} diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-error.c b/clang/test/CodeGen/builtins-ppc-xlcompat-error.c --- a/clang/test/CodeGen/builtins-ppc-xlcompat-error.c +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-error.c @@ -60,3 +60,36 @@ __mtfsfi(8, 0); //expected-error {{argument value 8 is outside the valid range [0, 7]}} __mtfsfi(5, 24); //expected-error {{argument value 24 is outside the valid range [0, 15]}} } + +unsigned long long testrdlam(unsigned long long rs, unsigned int shift, unsigned int not_const) { + // The third parameter is a mask that must be a constant that represents a + // contiguous bit field. + unsigned long long Return; + // Third parameter is not a constant. + Return = __rdlam(rs, shift, not_const); //expected-error {{argument to '__builtin_ppc_rdlam' must be a constant integer}} + // Third parameter is a constant but not a contiguous bit field. + return __rdlam(rs, shift, 0xF4) + Return; //expected-error {{argument 2 value should represent a contiguous bit field}} +} + +void testalignx(const void *pointer, unsigned int alignment) { + // The alignment must be an immediate. + __alignx(alignment, pointer); //expected-error {{argument to '__builtin_ppc_alignx' must be a constant integer}} + // The alignment must be a power of 2. + __alignx(0x0, pointer); //expected-error {{argument should be a power of 2}} + // The alignment must be a power of 2. + __alignx(0xFF, pointer); //expected-error {{argument should be a power of 2}} +} + +#ifndef __PPC64__ +long long testbpermd(long long bit_selector, long long source) { + return __bpermd(bit_selector, source); //expected-error {{this builtin is only available on 64-bit targets}} +} + +long long testdivde(long long dividend, long long divisor) { + return __divde(dividend, divisor); //expected-error {{this builtin is only available on 64-bit targets}} +} + +unsigned long long testdivdeu(unsigned long long dividend, unsigned long long divisor) { + return __divdeu(dividend, divisor); //expected-error {{this builtin is only available on 64-bit targets}} +} +#endif diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-expect.c b/clang/test/CodeGen/builtins-ppc-xlcompat-expect.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-expect.c @@ -0,0 +1,31 @@ +// REQUIRES: powerpc-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -O1 -disable-llvm-passes \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=64BIT +// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown -O1 -disable-llvm-passes \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s --check-prefix=64BIT +// RUN: %clang_cc1 -triple powerpc64-unknown-aix -O1 -disable-llvm-passes \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=64BIT +// RUN: %clang_cc1 -triple powerpc-unknown-unknown -O1 -disable-llvm-passes \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=32BIT +// RUN: %clang_cc1 -triple powerpcle-unknown-unknown -O1 -disable-llvm-passes \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s --check-prefix=32BIT +// RUN: %clang_cc1 -triple powerpc-unknown-aix -O1 -disable-llvm-passes \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefix=32BIT + +// 64BIT-LABEL: @testbuiltin_expect( +// 64BIT: [[EXPVAL:%.*]] = call i64 @llvm.expect.i64(i64 {{%.*}}, i64 23) +// 64BIT-NEXT: [[CMP:%.*]] = icmp eq i64 [[EXPVAL]], 23 +// 64BIT-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +// 64BIT-NEXT: [[CONV1:%.*]] = sext i32 [[CONV]] to i64 +// 64BIT-NEXT: ret i64 [[CONV1]] +// +// 32BIT-LABEL: @testbuiltin_expect( +// 32BIT: [[EXPVAL:%.*]] = call i32 @llvm.expect.i32(i32 {{%.*}}, i32 23) +// 32BIT-NEXT: [[CMP:%.*]] = icmp eq i32 [[EXPVAL]], 23 +// 32BIT-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 +// 32BIT-NEXT: ret i32 [[CONV]] +// +long testbuiltin_expect(long expression) { + // The second parameter is a long constant. + return __builtin_expect(expression, 23) == 23; +} diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-macros.c b/clang/test/CodeGen/builtins-ppc-xlcompat-macros.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-macros.c @@ -0,0 +1,214 @@ +// REQUIRES: powerpc-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefixes=64BIT --check-prefix=BOTH +// RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \ +// RUN: -emit-llvm %s -o - -target-cpu pwr8 | FileCheck %s --check-prefixes=64BIT --check-prefix=BOTH +// RUN: %clang_cc1 -triple powerpc-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefixes=32BIT --check-prefix=BOTH +// RUN: %clang_cc1 -triple powerpc64-unknown-aix \ +// RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s --check-prefixes=64BIT --check-prefix=BOTH + +// Will not be adding include files to avoid any dependencies on the system. +// Required for size_t. Usually found in stddef.h. +typedef __SIZE_TYPE__ size_t; + +// 64BIT-LABEL: @testlabs( +// 64BIT-NEXT: entry: +// 64BIT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// 64BIT-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// 64BIT-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// 64BIT-NEXT: [[NEG:%.*]] = sub nsw i64 0, [[TMP0]] +// 64BIT-NEXT: [[ABSCOND:%.*]] = icmp slt i64 [[TMP0]], 0 +// 64BIT-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i64 [[NEG]], i64 [[TMP0]] +// 64BIT-NEXT: ret i64 [[ABS]] +// +// 32BIT-LABEL: @testlabs( +// 32BIT-NEXT: entry: +// 32BIT-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 +// 32BIT-NEXT: store i32 [[A:%.*]], i32* [[A_ADDR]], align 4 +// 32BIT-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 +// 32BIT-NEXT: [[NEG:%.*]] = sub nsw i32 0, [[TMP0]] +// 32BIT-NEXT: [[ABSCOND:%.*]] = icmp slt i32 [[TMP0]], 0 +// 32BIT-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i32 [[NEG]], i32 [[TMP0]] +// 32BIT-NEXT: ret i32 [[ABS]] +// +signed long testlabs(signed long a) { + return __labs(a); +} + +// 64BIT-LABEL: @testllabs( +// 64BIT-NEXT: entry: +// 64BIT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// 64BIT-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// 64BIT-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// 64BIT-NEXT: [[NEG:%.*]] = sub nsw i64 0, [[TMP0]] +// 64BIT-NEXT: [[ABSCOND:%.*]] = icmp slt i64 [[TMP0]], 0 +// 64BIT-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i64 [[NEG]], i64 [[TMP0]] +// 64BIT-NEXT: ret i64 [[ABS]] +// +// 32BIT-LABEL: @testllabs( +// 32BIT-NEXT: entry: +// 32BIT-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 +// 32BIT-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR]], align 8 +// 32BIT-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8 +// 32BIT-NEXT: [[NEG:%.*]] = sub nsw i64 0, [[TMP0]] +// 32BIT-NEXT: [[ABSCOND:%.*]] = icmp slt i64 [[TMP0]], 0 +// 32BIT-NEXT: [[ABS:%.*]] = select i1 [[ABSCOND]], i64 [[NEG]], i64 [[TMP0]] +// 32BIT-NEXT: ret i64 [[ABS]] +// +signed long long testllabs(signed long long a) { + return __llabs(a); +} + +// 64BIT-LABEL: @testalloca( +// 64BIT: [[TMP1:%.*]] = alloca i8, i64 +// 64BIT-NEXT: ret i8* [[TMP1]] +// +// 32BIT-LABEL: @testalloca( +// 32BIT: [[TMP1:%.*]] = alloca i8, i32 +// 32BIT-NEXT: ret i8* [[TMP1]] +// +void *testalloca(size_t size) { + return __alloca(size); +} + +// Note that bpermd is 64 bit only. +#ifdef __PPC64__ +// 64BIT-LABEL: @testbpermd( +// 64BIT: [[TMP:%.*]] = call i64 @llvm.ppc.bpermd(i64 {{%.*}}, i64 {{%.*}}) +// 64BIT-NEXT: ret i64 [[TMP]] +// +long long testbpermd(long long bit_selector, long long source) { + return __bpermd(bit_selector, source); +} +#endif + +#ifdef __PPC64__ +// 64BIT-LABEL: @testdivde( +// 64BIT: [[TMP2:%.*]] = call i64 @llvm.ppc.divde +// 64BIT-NEXT: ret i64 [[TMP2]] +long long testdivde(long long dividend, long long divisor) { + return __divde(dividend, divisor); +} + +// 64BIT-LABEL: @testdivdeu( +// 64BIT: [[TMP2:%.*]] = call i64 @llvm.ppc.divdeu +// 64BIT-NEXT: ret i64 [[TMP2]] +unsigned long long testdivdeu(unsigned long long dividend, unsigned long long divisor) { + return __divdeu(dividend, divisor); +} +#endif + +// 64BIT-LABEL: @testdivwe( +// 64BIT: [[TMP2:%.*]] = call i32 @llvm.ppc.divwe +// 64BIT-NEXT: ret i32 [[TMP2]] +// +// 32BIT-LABEL: @testdivwe( +// 32BIT: [[TMP2:%.*]] = call i32 @llvm.ppc.divwe +// 32BIT-NEXT: ret i32 [[TMP2]] +int testdivwe(int dividend, int divisor) { + return __divwe(dividend, divisor); +} + +// 64BIT-LABEL: @testdivweu( +// 64BIT: [[TMP2:%.*]] = call i32 @llvm.ppc.divweu +// 64BIT-NEXT: ret i32 [[TMP2]] +// +// 32BIT-LABEL: @testdivweu( +// 32BIT: [[TMP2:%.*]] = call i32 @llvm.ppc.divweu +// 32BIT-NEXT: ret i32 [[TMP2]] +unsigned int testdivweu(unsigned int dividend, unsigned int divisor) { + return __divweu(dividend, divisor); +} + +// BOTH-LABEL: @testfmadd( +// BOTH: [[TMP3:%.*]] = call double @llvm.fma.f64 +// BOTH-NEXT: ret double [[TMP3]] +// +double testfmadd(double a, double b, double c) { + return __fmadd(a, b, c); +} + +// BOTH-LABEL: @testfmadds( +// BOTH: [[TMP3:%.*]] = call float @llvm.fma.f32( +// BOTH-NEXT: ret float [[TMP3]] +// +float testfmadds(float a, float b, float c) { + return __fmadds(a, b, c); +} + +// Required for bzero and bcopy. Usually in strings.h. +extern void bcopy(const void *__src, void *__dest, size_t __n); +extern void bzero(void *__s, size_t __n); + +// 64BIT-LABEL: @testalignx( +// 64BIT: call void @llvm.assume(i1 true) [ "align"(i8* {{%.*}}, i64 16) ] +// 64BIT-NEXT: ret void +// +// 32BIT-LABEL: @testalignx( +// 32BIT: call void @llvm.assume(i1 true) [ "align"(i8* {{%.*}}, i32 16) ] +// 32BIT-NEXT: ret void +// +void testalignx(const void *pointer) { + __alignx(16, pointer); +} + +// 64BIT-LABEL: @testbcopy( +// 64BIT: call void @bcopy(i8* {{%.*}}, i8* {{%.*}}, i64 {{%.*}}) +// 64BIT-NEXT: ret void +// +// 32BIT-LABEL: @testbcopy( +// 32BIT: call void @bcopy(i8* {{%.*}}, i8* {{%.*}}, i32 {{%.*}}) +// 32BIT-NEXT: ret void +// +void testbcopy(const void *src, void *dest, size_t n) { + __bcopy(src, dest, n); +} + +// 64BIT-LABEL: @testbzero( +// 64BIT: call void @llvm.memset.p0i8.i64(i8* align 1 {{%.*}}, i8 0, i64 {{%.*}}, i1 false) +// 64BIT-NEXT: ret void +// +// 32BIT-LABEL: @testbzero( +// 32BIT: call void @llvm.memset.p0i8.i32(i8* align 1 {{%.*}}, i8 0, i32 {{%.*}}, i1 false) +// 32BIT-NEXT: ret void +// +void testbzero(void *s, size_t n) { + bzero(s, n); +} + +// 64BIT-LABEL: @testdcbf( +// 64BIT: call void @llvm.ppc.dcbf(i8* {{%.*}}) +// 64BIT-NEXT: ret void +// +// 32BIT-LABEL: @testdcbf( +// 32BIT: call void @llvm.ppc.dcbf(i8* {{%.*}}) +// 32BIT-NEXT: ret void +// +void testdcbf(const void *addr) { + __dcbf(addr); +} + +// BOTH-LABEL: @testreadflm( +// BOTH: [[TMP0:%.*]] = call double @llvm.ppc.readflm() +// BOTH-NEXT: ret double [[TMP0]] +// +double testreadflm(void) { + return __readflm(); +} + +// BOTH-LABEL: @testsetflm( +// BOTH: [[TMP1:%.*]] = call double @llvm.ppc.setflm(double {{%.*}}) +// BOTH-NEXT: ret double [[TMP1]] +// +double testsetflm(double a) { + return __setflm(a); +} + +// BOTH-LABEL: @testsetrnd( +// BOTH: [[TMP1:%.*]] = call double @llvm.ppc.setrnd(i32 {{%.*}}) +// BOTH-NEXT: ret double [[TMP1]] +// +double testsetrnd(int mode) { + return __setrnd(mode); +} diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c b/clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c --- a/clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-popcnt.c @@ -1,4 +1,4 @@ -// REQUIRES: powerpc-registered-target. +// REQUIRES: powerpc-registered-target // RUN: %clang_cc1 -triple powerpc64-unknown-unknown \ // RUN: -emit-llvm %s -o - -target-cpu pwr7 | FileCheck %s // RUN: %clang_cc1 -triple powerpc64le-unknown-unknown \ @@ -12,9 +12,7 @@ extern unsigned long long ull; // CHECK-LABEL: @test_builtin_ppc_poppar4( -// CHECK: [[TMP0:%.*]] = load i32, i32* @ui, align 4 -// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* @ui, align 4 -// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.ctpop.i32(i32 [[TMP1]]) +// CHECK: [[TMP2:%.*]] = call i32 @llvm.ctpop.i32(i32 {{.*}}) // CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], 1 // CHECK-NEXT: ret i32 [[TMP3]] // @@ -23,9 +21,7 @@ } // CHECK-LABEL: @test_builtin_ppc_poppar8( -// CHECK: [[TMP0:%.*]] = load i64, i64* @ull, align 8 -// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* @ull, align 8 -// CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]]) +// CHECK: [[TMP2:%.*]] = call i64 @llvm.ctpop.i64(i64 {{.*}}) // CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], 1 // CHECK-NEXT: [[CAST:%.*]] = trunc i64 [[TMP3]] to i32 // CHECK-NEXT: ret i32 [[CAST]] @@ -33,3 +29,54 @@ int test_builtin_ppc_poppar8() { return __builtin_ppc_poppar8(ull); } + +// CHECK-LABEL: @testcntlz4( +// CHECK: [[TMP:%.*]] = call i32 @llvm.ctlz.i32(i32 {{%.*}}, i1 false) +// CHECK-NEXT: ret i32 [[TMP]] +// +unsigned int testcntlz4(unsigned int value) { + return __cntlz4(value); +} + +// CHECK-LABEL: @testcntlz8( +// CHECK: [[TMP:%.*]] = call i64 @llvm.ctlz.i64(i64 {{%.*}}, i1 false) +// CHECK-NEXT: [[CAST:%.*]] = trunc i64 [[TMP]] to i32 +// CHECK-NEXT: ret i32 [[CAST]] +// +unsigned int testcntlz8(unsigned long long value) { + return __cntlz8(value); +} + +// CHECK-LABEL: @testcnttz4( +// CHECK: [[TMP:%.*]] = call i32 @llvm.cttz.i32(i32 {{%.*}}, i1 false) +// CHECK-NEXT: ret i32 [[TMP]] +// +unsigned int testcnttz4(unsigned int value) { + return __cnttz4(value); +} + +// CHECK-LABEL: @testcnttz8( +// CHECK: [[TMP:%.*]] = call i64 @llvm.cttz.i64(i64 {{%.*}}, i1 false) +// CHECK-NEXT: [[CAST:%.*]] = trunc i64 [[TMP]] to i32 +// CHECK-NEXT: ret i32 [[CAST]] +// +unsigned int testcnttz8(unsigned long long value) { + return __cnttz8(value); +} + +// CHECK-LABEL: @testpopcnt4( +// CHECK: [[TMP:%.*]] = call i32 @llvm.ctpop.i32(i32 {{%.*}}) +// CHECK-NEXT: ret i32 [[TMP]] +// +int testpopcnt4(unsigned int value) { + return __popcnt4(value); +} + +// CHECK-LABEL: @testpopcnt8( +// CHECK: [[TMP:%.*]] = call i64 @llvm.ctpop.i64(i64 {{%.*}}) +// CHECK-NEXT: [[CAST:%.*]] = trunc i64 [[TMP]] to i32 +// CHECK-NEXT: ret i32 [[CAST]] +// +int testpopcnt8(unsigned long long value) { + return __popcnt8(value); +} diff --git a/clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c b/clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c --- a/clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c +++ b/clang/test/CodeGen/builtins-ppc-xlcompat-rotate.c @@ -55,3 +55,30 @@ /*shift = 31, mask = 0x1FF = 511*/ unsigned int res = __builtin_ppc_rlwnm(ui, 31, 0x1FF); } + +// CHECK-LABEL: @testrotatel4( +// CHECK: [[TMP:%.*]] = call i32 @llvm.fshl.i32(i32 {{%.*}}, i32 {{%.*}}, i32 {{%.*}}) +// CHECK-NEXT: ret i32 [[TMP]] +// +unsigned int testrotatel4(unsigned int rs, unsigned int shift) { + return __rotatel4(rs, shift); +} + +// CHECK-LABEL: @testrotatel8( +// CHECK: [[TMP:%.*]] = call i64 @llvm.fshl.i64(i64 {{%.*}}, i64 {{%.*}}, i64 {{%.*}}) +// CHECK-NEXT: ret i64 [[TMP]] +// +unsigned long long testrotatel8(unsigned long long rs, unsigned long long shift) { + return __rotatel8(rs, shift); +} + +// CHECK-LABEL: @testrdlam( +// CHECK: [[TMP0:%.*]] = call i64 @llvm.fshl.i64(i64 {{%.*}}, i64 {{%.*}}, i64 {{%.*}}) +// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], 7 +// CHECK-NEXT: ret i64 [[TMP1]] +// +unsigned long long testrdlam(unsigned long long rs, unsigned int shift) { + // The third parameter is a mask that must be a constant that represents a + // contiguous bit field. + return __rdlam(rs, shift, 7); +}