Index: lib/Target/PowerPC/PPCInstrAltivec.td =================================================================== --- lib/Target/PowerPC/PPCInstrAltivec.td +++ lib/Target/PowerPC/PPCInstrAltivec.td @@ -970,7 +970,6 @@ [(set v2i64:$vD, (ctpop v2i64:$vB))]>; let isCommutable = 1 in { -let AddedComplexity = 500 in { // FIXME: Use AddedComplexity > 400 to ensure these patterns match before the // VSX equivalents. We need to fix this up at some point. Two possible // solutions for this problem: @@ -991,6 +990,5 @@ "vorc $vD, $vA, $vB", IIC_VecGeneral, [(set v4i32:$vD, (or v4i32:$vA, (vnot_ppc v4i32:$vB)))]>; -} // AddedComplexity = 500 } // isCommutable } // end HasP8Altivec Index: lib/Target/PowerPC/PPCInstrVSX.td =================================================================== --- lib/Target/PowerPC/PPCInstrVSX.td +++ lib/Target/PowerPC/PPCInstrVSX.td @@ -940,3 +940,28 @@ } // AddedComplexity } // HasVSX +// The following VSX instructions were introduced in Power ISA 2.07 +/* FIXME: if the operands are v2i64, these patterns will not match. + we should define new patterns or otherwise match the same patterns + when the elements are larger than i32. +*/ +def HasP8Vector : Predicate<"PPCSubTarget->hasP8Vector()">; +let Predicates = [HasP8Vector] in { +let AddedComplexity = 400 in { // Prefer VSX patterns over non-VSX patterns. +let isCommutable = 1 in { + def XXLEQV : XX3Form<60, 186, + (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xxleqv $XT, $XA, $XB", IIC_VecGeneral, + [(set v4i32:$XT, (vnot_ppc (xor v4i32:$XA, v4i32:$XB)))]>; + def XXLNAND : XX3Form<60, 178, + (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xxlnand $XT, $XA, $XB", IIC_VecGeneral, + [(set v4i32:$XT, (vnot_ppc (and v4i32:$XA, + v4i32:$XB)))]>; + } // isCommutable +def XXLORC : XX3Form<60, 170, + (outs vsrc:$XT), (ins vsrc:$XA, vsrc:$XB), + "xxlorc $XT, $XA, $XB", IIC_VecGeneral, + [(set v4i32:$XT, (or v4i32:$XA, (vnot_ppc v4i32:$XB)))]>; +} // AddedComplexity = 500 +} // HasP8Vector Index: test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll =================================================================== --- test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll +++ test/CodeGen/PowerPC/vec_veqv_vnand_vorc.ll @@ -1,6 +1,5 @@ ; Check the miscellaneous logical vector operations added in P8 ; -; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s ; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s ; Test x eqv y define <4 x i32> @test_veqv(<4 x i32> %x, <4 x i32> %y) nounwind { Index: test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll =================================================================== --- test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll +++ test/CodeGen/PowerPC/xxleqv_xxlnand_xxlorc.ll @@ -0,0 +1,52 @@ +; Check the miscellaneous logical vector operations added in P8 +; +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -march=ppc64le -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s +; Test x eqv y +define <4 x i32> @test_xxleqv(<4 x i32> %x, <4 x i32> %y) nounwind { + %tmp = xor <4 x i32> %x, %y + %ret_val = xor <4 x i32> %tmp, < i32 -1, i32 -1, i32 -1, i32 -1> + ret <4 x i32> %ret_val +; CHECK: xxleqv 34, 34, 35 +} + +; Test x xxlnand y +define <4 x i32> @test_xxlnand(<4 x i32> %x, <4 x i32> %y) nounwind { + %tmp = and <4 x i32> %x, %y + %ret_val = xor <4 x i32> %tmp, + ret <4 x i32> %ret_val +; CHECK: xxlnand 34, 34, 35 +} + +; Test x xxlorc y +define <4 x i32> @test_xxlorc(<4 x i32> %x, <4 x i32> %y) nounwind { + %tmp = xor <4 x i32> %y, + %ret_val = or <4 x i32> %x, %tmp + ret <4 x i32> %ret_val +; CHECK: xxlorc 34, 34, 35 +} + +; Test x eqv y +define <8 x i16> @test_xxleqvv8i16(<8 x i16> %x, <8 x i16> %y) nounwind { + %tmp = xor <8 x i16> %x, %y + %ret_val = xor <8 x i16> %tmp, + ret <8 x i16> %ret_val +; CHECK: xxleqv 34, 34, 35 +} + +; Test x xxlnand y +define <8 x i16> @test_xxlnandv8i16(<8 x i16> %x, <8 x i16> %y) nounwind { + %tmp = and <8 x i16> %x, %y + %ret_val = xor <8 x i16> %tmp, + ret <8 x i16> %ret_val +; CHECK: xxlnand 34, 34, 35 +} + +; Test x xxlorc y +define <8 x i16> @test_xxlorcv8i16(<8 x i16> %x, <8 x i16> %y) nounwind { + %tmp = xor <8 x i16> %y, + %ret_val = or <8 x i16> %x, %tmp + ret <8 x i16> %ret_val +; CHECK: xxlorc 34, 34, 35 +} + Index: test/MC/Disassembler/PowerPC/vsx.txt =================================================================== --- test/MC/Disassembler/PowerPC/vsx.txt +++ test/MC/Disassembler/PowerPC/vsx.txt @@ -404,6 +404,15 @@ # CHECK: xxland 7, 63, 27 0xf0 0xff 0xdc 0x14 +# CHECK: xxleqv 7, 63, 27 +0xf0 0xff 0xdd 0xd4 + +# CHECK: xxlnand 7, 63, 27 +0xf0 0xff 0xdd 0x94 + +# CHECK: xxlorc 7, 63, 27 +0xf0 0xff 0xdd 0x54 + # CHECK: xxlandc 7, 63, 27 0xf0 0xff 0xdc 0x54 Index: test/MC/PowerPC/vsx.s =================================================================== --- test/MC/PowerPC/vsx.s +++ test/MC/PowerPC/vsx.s @@ -403,6 +403,15 @@ # CHECK-BE: xxland 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x14] # CHECK-LE: xxland 7, 63, 27 # encoding: [0x14,0xdc,0xff,0xf0] xxland 7, 63, 27 +# CHECK-BE: xxleqv 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0xd4] +# CHECK-LE: xxleqv 7, 63, 27 # encoding: [0xd4,0xdd,0xff,0xf0] + xxleqv 7, 63, 27 +# CHECK-BE: xxlnand 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x94] +# CHECK-LE: xxlnand 7, 63, 27 # encoding: [0x94,0xdd,0xff,0xf0] + xxlnand 7, 63, 27 +# CHECK-BE: xxlorc 7, 63, 27 # encoding: [0xf0,0xff,0xdd,0x54] +# CHECK-LE: xxlorc 7, 63, 27 # encoding: [0x54,0xdd,0xff,0xf0] + xxlorc 7, 63, 27 # CHECK-BE: xxlandc 7, 63, 27 # encoding: [0xf0,0xff,0xdc,0x54] # CHECK-LE: xxlandc 7, 63, 27 # encoding: [0x54,0xdc,0xff,0xf0] xxlandc 7, 63, 27