Index: clang/include/clang/Basic/BuiltinsPPC.def =================================================================== --- clang/include/clang/Basic/BuiltinsPPC.def +++ clang/include/clang/Basic/BuiltinsPPC.def @@ -404,6 +404,7 @@ // P8 Crypto built-ins. BUILTIN(__builtin_altivec_crypto_vsbox, "V2ULLiV2ULLi", "") BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "") +BUILTIN(__builtin_altivec_crypto_vpermxor_be, "V16UcV16UcV16UcV16Uc", "") BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "") BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "") BUILTIN(__builtin_altivec_crypto_vcipher, "V2ULLiV2ULLiV2ULLi", "") Index: llvm/include/llvm/IR/IntrinsicsPowerPC.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -1087,6 +1087,10 @@ GCCBuiltin<"__builtin_altivec_crypto_vpermxor">, Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; + def int_ppc_altivec_crypto_vpermxor_be : + GCCBuiltin<"__builtin_altivec_crypto_vpermxor_be">, + Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, + llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>; def int_ppc_altivec_crypto_vshasigmad : GCCBuiltin<"__builtin_altivec_crypto_vshasigmad">, Index: llvm/lib/Target/PowerPC/PPCInstrVSX.td =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrVSX.td +++ llvm/lib/Target/PowerPC/PPCInstrVSX.td @@ -2491,11 +2491,16 @@ // These Altivec patterns are here because we need a VSX instruction to match // the intrinsic (but only for little endian system). -let Predicates = [HasVSX, IsLittleEndian, HasP8Altivec] in +let Predicates = [HasVSX, IsLittleEndian, HasP8Altivec] in { def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor v16i8:$a, v16i8:$b, v16i8:$c)), (v16i8 (VPERMXOR $a, $b, (XXLNOR (COPY_TO_REGCLASS $c, VSRC), (COPY_TO_REGCLASS $c, VSRC))))>; + def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor_be v16i8:$a, + v16i8:$b, v16i8:$c)), + (v16i8 (VPERMXOR $a, $b, $c))>; +} // HasVSX, IsLittleEndian, HasP8Altivec + let Predicates = [HasVSX, IsBigEndian, HasP8Altivec] in def : Pat<(v16i8 (int_ppc_altivec_crypto_vpermxor v16i8:$a, v16i8:$b, v16i8:$c)), Index: llvm/test/CodeGen/PowerPC/crypto_bifs_be.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/PowerPC/crypto_bifs_be.ll @@ -0,0 +1,94 @@ +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr9 < %s | FileCheck %s + +; Function Attrs: nounwind +define <16 x i8> @test_vpermxorb() #0 { +entry: + %a = alloca <16 x i8>, align 16 + %b = alloca <16 x i8>, align 16 + %c = alloca <16 x i8>, align 16 + store <16 x i8> , <16 x i8>* %a, align 16 + store <16 x i8> , <16 x i8>* %b, align 16 + store <16 x i8> , <16 x i8>* %c, align 16 + %0 = load <16 x i8>, <16 x i8>* %a, align 16 + %1 = load <16 x i8>, <16 x i8>* %b, align 16 + %2 = load <16 x i8>, <16 x i8>* %c, align 16 + %3 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %0, <16 x i8> %1, <16 x i8> %2) + ret <16 x i8> %3 +; CHECK-NOT: xxlnor +; CHECK: vpermxor 2, +} + +; Function Attrs: nounwind readnone +declare <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8>, <16 x i8>, <16 x i8>) #1 + +; Function Attrs: nounwind +define <8 x i16> @test_vpermxorh() #0 { +entry: + %a = alloca <8 x i16>, align 16 + %b = alloca <8 x i16>, align 16 + %c = alloca <8 x i16>, align 16 + store <8 x i16> , <8 x i16>* %a, align 16 + store <8 x i16> , <8 x i16>* %b, align 16 + store <8 x i16> , <8 x i16>* %c, align 16 + %0 = load <8 x i16>, <8 x i16>* %a, align 16 + %1 = bitcast <8 x i16> %0 to <16 x i8> + %2 = load <8 x i16>, <8 x i16>* %b, align 16 + %3 = bitcast <8 x i16> %2 to <16 x i8> + %4 = load <8 x i16>, <8 x i16>* %c, align 16 + %5 = bitcast <8 x i16> %4 to <16 x i8> + %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5) + %7 = bitcast <16 x i8> %6 to <8 x i16> + ret <8 x i16> %7 +; CHECK-NOT: xxlnor +; CHECK: vpermxor 2, +} + +; Function Attrs: nounwind +define <4 x i32> @test_vpermxorw() #0 { +entry: + %a = alloca <4 x i32>, align 16 + %b = alloca <4 x i32>, align 16 + %c = alloca <4 x i32>, align 16 + store <4 x i32> , <4 x i32>* %a, align 16 + store <4 x i32> , <4 x i32>* %b, align 16 + store <4 x i32> , <4 x i32>* %c, align 16 + %0 = load <4 x i32>, <4 x i32>* %a, align 16 + %1 = bitcast <4 x i32> %0 to <16 x i8> + %2 = load <4 x i32>, <4 x i32>* %b, align 16 + %3 = bitcast <4 x i32> %2 to <16 x i8> + %4 = load <4 x i32>, <4 x i32>* %c, align 16 + %5 = bitcast <4 x i32> %4 to <16 x i8> + %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5) + %7 = bitcast <16 x i8> %6 to <4 x i32> + ret <4 x i32> %7 +; CHECK-NOT: xxlnor +; CHECK: vpermxor 2, +} + +; Function Attrs: nounwind +define <2 x i64> @test_vpermxord() #0 { +entry: + %a = alloca <2 x i64>, align 16 + %b = alloca <2 x i64>, align 16 + %c = alloca <2 x i64>, align 16 + store <2 x i64> , <2 x i64>* %a, align 16 + store <2 x i64> , <2 x i64>* %b, align 16 + store <2 x i64> , <2 x i64>* %c, align 16 + %0 = load <2 x i64>, <2 x i64>* %a, align 16 + %1 = bitcast <2 x i64> %0 to <16 x i8> + %2 = load <2 x i64>, <2 x i64>* %b, align 16 + %3 = bitcast <2 x i64> %2 to <16 x i8> + %4 = load <2 x i64>, <2 x i64>* %c, align 16 + %5 = bitcast <2 x i64> %4 to <16 x i8> + %6 = call <16 x i8> @llvm.ppc.altivec.crypto.vpermxor.be(<16 x i8> %1, <16 x i8> %3, <16 x i8> %5) + %7 = bitcast <16 x i8> %6 to <2 x i64> + ret <2 x i64> %7 +; CHECK-NOT: xxlnor +; CHECK: vpermxor 2, +} + +attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind readnone }