Index: test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask-const.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask-const.ll @@ -0,0 +1,232 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s + +; ============================================================================ ; +; Various cases with %x and/or %y being a constant +; ============================================================================ ; + +define <4 x i32> @out_constant_varx_mone(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_varx_mone: +; CHECK: // %bb.0: +; CHECK-NEXT: and v0.16b, v2.16b, v0.16b +; CHECK-NEXT: orn v0.16b, v0.16b, v2.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, %x + %my = and <4 x i32> %notmask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_varx_mone(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_varx_mone: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.16b, v2.16b, v0.16b +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: ret + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_varx_mone_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_varx_mone_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.16b, v0.16b, v2.16b +; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, %x + %my = and <4 x i32> %mask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_varx_mone_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_varx_mone_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: bic v0.16b, v0.16b, v2.16b +; CHECK-NEXT: mvn v0.16b, v0.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +define <4 x i32> @out_constant_varx_42(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_varx_42: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #42 +; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, %x + %my = and <4 x i32> %notmask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_varx_42(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_varx_42: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #42 +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_varx_42_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_varx_42_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #42 +; CHECK-NEXT: bsl v2.16b, v1.16b, v0.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, %x + %my = and <4 x i32> %mask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_varx_42_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_varx_42_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v1.4s, #42 +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: bic v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +define <4 x i32> @out_constant_mone_vary(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_mone_vary: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.16b, v1.16b, v2.16b +; CHECK-NEXT: orr v0.16b, v2.16b, v0.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, + %my = and <4 x i32> %notmask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_mone_vary(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_mone_vary: +; CHECK: // %bb.0: +; CHECK-NEXT: bic v0.16b, v2.16b, v1.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_mone_vary_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_mone_vary_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: and v0.16b, v2.16b, v1.16b +; CHECK-NEXT: orn v0.16b, v0.16b, v2.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, + %my = and <4 x i32> %mask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_mone_vary_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_mone_vary_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: mvn v0.16b, v1.16b +; CHECK-NEXT: bic v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +define <4 x i32> @out_constant_42_vary(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_42_vary: +; CHECK: // %bb.0: +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: movi v2.4s, #42 +; CHECK-NEXT: bsl v0.16b, v2.16b, v1.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, + %my = and <4 x i32> %notmask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_42_vary(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_42_vary: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.4s, #42 +; CHECK-NEXT: eor v0.16b, v1.16b, v0.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_42_vary_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: out_constant_42_vary_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: movi v2.4s, #42 +; CHECK-NEXT: bsl v0.16b, v1.16b, v2.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, + %my = and <4 x i32> %mask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_42_vary_invmask(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { +; CHECK-LABEL: in_constant_42_vary_invmask: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.4s, #42 +; CHECK-NEXT: eor v0.16b, v1.16b, v0.16b +; CHECK-NEXT: bic v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} Index: test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/unfold-masked-merge-vector-variablemask.ll @@ -0,0 +1,466 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s + +; https://bugs.llvm.org/show_bug.cgi?id=37104 + +; All the advanced stuff (negative tests, commutativity) is handled in the +; scalar version of the test only. + +; ============================================================================ ; +; 8-bit vector width +; ============================================================================ ; + +define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind { +; CHECK-LABEL: out_v1i8: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <1 x i8> %x, %mask + %notmask = xor <1 x i8> %mask, + %my = and <1 x i8> %y, %notmask + %r = or <1 x i8> %mx, %my + ret <1 x i8> %r +} + +; ============================================================================ ; +; 16-bit vector width +; ============================================================================ ; + +define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind { +; CHECK-LABEL: out_v2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: movi d3, #0x0000ff000000ff +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b +; CHECK-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %mx = and <2 x i8> %x, %mask + %notmask = xor <2 x i8> %mask, + %my = and <2 x i8> %y, %notmask + %r = or <2 x i8> %mx, %my + ret <2 x i8> %r +} + +define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind { +; CHECK-LABEL: out_v1i16: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <1 x i16> %x, %mask + %notmask = xor <1 x i16> %mask, + %my = and <1 x i16> %y, %notmask + %r = or <1 x i16> %mx, %my + ret <1 x i16> %r +} + +; ============================================================================ ; +; 32-bit vector width +; ============================================================================ ; + +define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { +; CHECK-LABEL: out_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: movi d3, #0xff00ff00ff00ff +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b +; CHECK-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %mx = and <4 x i8> %x, %mask + %notmask = xor <4 x i8> %mask, + %my = and <4 x i8> %y, %notmask + %r = or <4 x i8> %mx, %my + ret <4 x i8> %r +} + +define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { +; CHECK-LABEL: out_v4i8_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: movi d3, #0xff00ff00ff00ff +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b +; CHECK-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %mx = and <4 x i8> %x, %mask + %notmask = xor <4 x i8> %mask, + %my = and <4 x i8> %y, %notmask + %r = or <4 x i8> %mx, %my + ret <4 x i8> %r +} + +define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind { +; CHECK-LABEL: out_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: movi d3, #0x00ffff0000ffff +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v2.8b, v2.8b, v3.8b +; CHECK-NEXT: and v1.8b, v1.8b, v2.8b +; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %mx = and <2 x i16> %x, %mask + %notmask = xor <2 x i16> %mask, + %my = and <2 x i16> %y, %notmask + %r = or <2 x i16> %mx, %my + ret <2 x i16> %r +} + +define <1 x i32> @out_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind { +; CHECK-LABEL: out_v1i32: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <1 x i32> %x, %mask + %notmask = xor <1 x i32> %mask, + %my = and <1 x i32> %y, %notmask + %r = or <1 x i32> %mx, %my + ret <1 x i32> %r +} + +; ============================================================================ ; +; 64-bit vector width +; ============================================================================ ; + +define <8 x i8> @out_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind { +; CHECK-LABEL: out_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <8 x i8> %x, %mask + %notmask = xor <8 x i8> %mask, + %my = and <8 x i8> %y, %notmask + %r = or <8 x i8> %mx, %my + ret <8 x i8> %r +} + +define <4 x i16> @out_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind { +; CHECK-LABEL: out_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <4 x i16> %x, %mask + %notmask = xor <4 x i16> %mask, + %my = and <4 x i16> %y, %notmask + %r = or <4 x i16> %mx, %my + ret <4 x i16> %r +} + +define <4 x i16> @out_v4i16_undef(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind { +; CHECK-LABEL: out_v4i16_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <4 x i16> %x, %mask + %notmask = xor <4 x i16> %mask, + %my = and <4 x i16> %y, %notmask + %r = or <4 x i16> %mx, %my + ret <4 x i16> %r +} + +define <2 x i32> @out_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind { +; CHECK-LABEL: out_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <2 x i32> %x, %mask + %notmask = xor <2 x i32> %mask, + %my = and <2 x i32> %y, %notmask + %r = or <2 x i32> %mx, %my + ret <2 x i32> %r +} + +define <1 x i64> @out_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind { +; CHECK-LABEL: out_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.8b, v0.8b, v1.8b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <1 x i64> %x, %mask + %notmask = xor <1 x i64> %mask, + %my = and <1 x i64> %y, %notmask + %r = or <1 x i64> %mx, %my + ret <1 x i64> %r +} + +; ============================================================================ ; +; 128-bit vector width +; ============================================================================ ; + +define <16 x i8> @out_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind { +; CHECK-LABEL: out_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <16 x i8> %x, %mask + %notmask = xor <16 x i8> %mask, + %my = and <16 x i8> %y, %notmask + %r = or <16 x i8> %mx, %my + ret <16 x i8> %r +} + +define <8 x i16> @out_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind { +; CHECK-LABEL: out_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <8 x i16> %x, %mask + %notmask = xor <8 x i16> %mask, + %my = and <8 x i16> %y, %notmask + %r = or <8 x i16> %mx, %my + ret <8 x i16> %r +} + +define <4 x i32> @out_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind { +; CHECK-LABEL: out_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <4 x i32> %x, %mask + %notmask = xor <4 x i32> %mask, + %my = and <4 x i32> %y, %notmask + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @out_v4i32_undef(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind { +; CHECK-LABEL: out_v4i32_undef: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <4 x i32> %x, %mask + %notmask = xor <4 x i32> %mask, + %my = and <4 x i32> %y, %notmask + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <2 x i64> @out_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind { +; CHECK-LABEL: out_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: bsl v2.16b, v0.16b, v1.16b +; CHECK-NEXT: mov v0.16b, v2.16b +; CHECK-NEXT: ret + %mx = and <2 x i64> %x, %mask + %notmask = xor <2 x i64> %mask, + %my = and <2 x i64> %y, %notmask + %r = or <2 x i64> %mx, %my + ret <2 x i64> %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Should be the same as the previous one. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; ============================================================================ ; +; 8-bit vector width +; ============================================================================ ; + +define <1 x i8> @in_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind { +; CHECK-LABEL: in_v1i8: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <1 x i8> %x, %y + %n1 = and <1 x i8> %n0, %mask + %r = xor <1 x i8> %n1, %y + ret <1 x i8> %r +} + +; ============================================================================ ; +; 16-bit vector width +; ============================================================================ ; + +define <2 x i8> @in_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind { +; CHECK-LABEL: in_v2i8: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <2 x i8> %x, %y + %n1 = and <2 x i8> %n0, %mask + %r = xor <2 x i8> %n1, %y + ret <2 x i8> %r +} + +define <1 x i16> @in_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind { +; CHECK-LABEL: in_v1i16: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <1 x i16> %x, %y + %n1 = and <1 x i16> %n0, %mask + %r = xor <1 x i16> %n1, %y + ret <1 x i16> %r +} + +; ============================================================================ ; +; 32-bit vector width +; ============================================================================ ; + +define <4 x i8> @in_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { +; CHECK-LABEL: in_v4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <4 x i8> %x, %y + %n1 = and <4 x i8> %n0, %mask + %r = xor <4 x i8> %n1, %y + ret <4 x i8> %r +} + +define <2 x i16> @in_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind { +; CHECK-LABEL: in_v2i16: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <2 x i16> %x, %y + %n1 = and <2 x i16> %n0, %mask + %r = xor <2 x i16> %n1, %y + ret <2 x i16> %r +} + +define <1 x i32> @in_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind { +; CHECK-LABEL: in_v1i32: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <1 x i32> %x, %y + %n1 = and <1 x i32> %n0, %mask + %r = xor <1 x i32> %n1, %y + ret <1 x i32> %r +} + +; ============================================================================ ; +; 64-bit vector width +; ============================================================================ ; + +define <8 x i8> @in_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind { +; CHECK-LABEL: in_v8i8: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <8 x i8> %x, %y + %n1 = and <8 x i8> %n0, %mask + %r = xor <8 x i8> %n1, %y + ret <8 x i8> %r +} + +define <4 x i16> @in_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind { +; CHECK-LABEL: in_v4i16: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <4 x i16> %x, %y + %n1 = and <4 x i16> %n0, %mask + %r = xor <4 x i16> %n1, %y + ret <4 x i16> %r +} + +define <2 x i32> @in_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind { +; CHECK-LABEL: in_v2i32: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <2 x i32> %x, %y + %n1 = and <2 x i32> %n0, %mask + %r = xor <2 x i32> %n1, %y + ret <2 x i32> %r +} + +define <1 x i64> @in_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind { +; CHECK-LABEL: in_v1i64: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v0.8b, v0.8b, v2.8b +; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b +; CHECK-NEXT: ret + %n0 = xor <1 x i64> %x, %y + %n1 = and <1 x i64> %n0, %mask + %r = xor <1 x i64> %n1, %y + ret <1 x i64> %r +} + +; ============================================================================ ; +; 128-bit vector width +; ============================================================================ ; + +define <16 x i8> @in_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind { +; CHECK-LABEL: in_v16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <16 x i8> %x, %y + %n1 = and <16 x i8> %n0, %mask + %r = xor <16 x i8> %n1, %y + ret <16 x i8> %r +} + +define <8 x i16> @in_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind { +; CHECK-LABEL: in_v8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <8 x i16> %x, %y + %n1 = and <8 x i16> %n0, %mask + %r = xor <8 x i16> %n1, %y + ret <8 x i16> %r +} + +define <4 x i32> @in_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind { +; CHECK-LABEL: in_v4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <4 x i32> %x, %y + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +define <2 x i64> @in_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind { +; CHECK-LABEL: in_v2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: and v0.16b, v0.16b, v2.16b +; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b +; CHECK-NEXT: ret + %n0 = xor <2 x i64> %x, %y + %n1 = and <2 x i64> %n0, %mask + %r = xor <2 x i64> %n1, %y + ret <2 x i64> %r +} Index: test/CodeGen/X86/machine-cp.ll =================================================================== --- test/CodeGen/X86/machine-cp.ll +++ test/CodeGen/X86/machine-cp.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: testl %esi, %esi ; CHECK-NEXT: je LBB0_1 ; CHECK-NEXT: ## %bb.2: ## %while.body.preheader -; CHECK-NEXT: movl %esi, %edx +; CHECK-NEXT: movl %esi, %edx ; CHECK-NEXT: .p2align 4, 0x90 ; CHECK-NEXT: LBB0_3: ## %while.body ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1 Index: test/CodeGen/X86/unfold-masked-merge-vector-variablemask-const.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/unfold-masked-merge-vector-variablemask-const.ll @@ -0,0 +1,490 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,-sse2 < %s | FileCheck %s --check-prefix=CHECK-SSE +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,+sse2 < %s | FileCheck %s --check-prefix=CHECK-SSE2 + +; ============================================================================ ; +; Various cases with %x and/or %y being a constant +; ============================================================================ ; + +define <4 x i32> @out_constant_varx_mone(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_varx_mone: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [nan,nan,nan,nan] +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andps (%rsi), %xmm0 +; CHECK-SSE-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_varx_mone: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movdqa (%rdx), %xmm0 +; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-SSE2-NEXT: pxor %xmm0, %xmm1 +; CHECK-SSE2-NEXT: pand (%rdi), %xmm0 +; CHECK-SSE2-NEXT: por %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, %x + %my = and <4 x i32> %notmask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_varx_mone(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_varx_mone: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rsi), %xmm0 +; CHECK-SSE-NEXT: andnps (%rcx), %xmm0 +; CHECK-SSE-NEXT: xorps {{.*}}(%rip), %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_varx_mone: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movdqa (%rdi), %xmm0 +; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-SSE2-NEXT: pandn (%rdx), %xmm0 +; CHECK-SSE2-NEXT: pxor %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_varx_mone_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_varx_mone_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps (%rsi), %xmm1 +; CHECK-SSE-NEXT: orps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_varx_mone_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm1 +; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andnps (%rdi), %xmm0 +; CHECK-SSE2-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, %x + %my = and <4 x i32> %mask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_varx_mone_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_varx_mone_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rsi), %xmm0 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [nan,nan,nan,nan] +; CHECK-SSE-NEXT: movaps (%rcx), %xmm2 +; CHECK-SSE-NEXT: xorps %xmm1, %xmm2 +; CHECK-SSE-NEXT: andnps %xmm2, %xmm0 +; CHECK-SSE-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_varx_mone_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movdqa (%rdi), %xmm0 +; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-SSE2-NEXT: movdqa (%rdx), %xmm2 +; CHECK-SSE2-NEXT: pxor %xmm1, %xmm2 +; CHECK-SSE2-NEXT: pandn %xmm2, %xmm0 +; CHECK-SSE2-NEXT: pxor %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +define <4 x i32> @out_constant_varx_42(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_varx_42: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps (%rsi), %xmm1 +; CHECK-SSE-NEXT: andps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps {{.*}}(%rip), %xmm0 +; CHECK-SSE-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_varx_42: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: movaps (%rdi), %xmm1 +; CHECK-SSE2-NEXT: andps %xmm0, %xmm1 +; CHECK-SSE2-NEXT: andnps {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, %x + %my = and <4 x i32> %notmask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_varx_42(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_varx_42: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm0 = [5.885454e-44,5.885454e-44,5.885454e-44,5.885454e-44] +; CHECK-SSE-NEXT: movaps (%rsi), %xmm1 +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andps (%rcx), %xmm1 +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_varx_42: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps {{.*#+}} xmm1 = [42,42,42,42] +; CHECK-SSE2-NEXT: movaps (%rdi), %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_varx_42_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_varx_42_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps (%rsi), %xmm1 +; CHECK-SSE-NEXT: andps {{.*}}(%rip), %xmm0 +; CHECK-SSE-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_varx_42_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE2-NEXT: andnps (%rdi), %xmm1 +; CHECK-SSE2-NEXT: andps {{.*}}(%rip), %xmm0 +; CHECK-SSE2-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, %x + %my = and <4 x i32> %mask, + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_varx_42_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_varx_42_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [5.885454e-44,5.885454e-44,5.885454e-44,5.885454e-44] +; CHECK-SSE-NEXT: movaps (%rsi), %xmm2 +; CHECK-SSE-NEXT: xorps %xmm1, %xmm2 +; CHECK-SSE-NEXT: andnps %xmm2, %xmm0 +; CHECK-SSE-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_varx_42_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: movaps {{.*#+}} xmm1 = [42,42,42,42] +; CHECK-SSE2-NEXT: movaps (%rdi), %xmm2 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: andnps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> %x, ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, + ret <4 x i32> %r +} + +define <4 x i32> @out_constant_mone_vary(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_mone_vary: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps (%rdx), %xmm1 +; CHECK-SSE-NEXT: orps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_mone_vary: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm1 +; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andnps (%rsi), %xmm0 +; CHECK-SSE2-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, + %my = and <4 x i32> %notmask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_mone_vary(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_mone_vary: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps (%rcx), %xmm1 +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_mone_vary: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rsi), %xmm1 +; CHECK-SSE2-NEXT: movaps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andnps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_mone_vary_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_mone_vary_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [nan,nan,nan,nan] +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andps (%rdx), %xmm0 +; CHECK-SSE-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_mone_vary_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movdqa (%rdx), %xmm0 +; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-SSE2-NEXT: pxor %xmm0, %xmm1 +; CHECK-SSE2-NEXT: pand (%rsi), %xmm0 +; CHECK-SSE2-NEXT: por %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, + %my = and <4 x i32> %mask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_mone_vary_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_mone_vary_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE-NEXT: movaps (%rcx), %xmm1 +; CHECK-SSE-NEXT: xorps {{.*}}(%rip), %xmm1 +; CHECK-SSE-NEXT: movaps %xmm0, %xmm2 +; CHECK-SSE-NEXT: andnps %xmm1, %xmm2 +; CHECK-SSE-NEXT: xorps %xmm0, %xmm2 +; CHECK-SSE-NEXT: movaps %xmm2, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_mone_vary_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movdqa (%rsi), %xmm1 +; CHECK-SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; CHECK-SSE2-NEXT: pxor (%rdx), %xmm2 +; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm0 +; CHECK-SSE2-NEXT: pandn %xmm2, %xmm0 +; CHECK-SSE2-NEXT: pxor %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +define <4 x i32> @out_constant_42_vary(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_42_vary: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [5.885454e-44,5.885454e-44,5.885454e-44,5.885454e-44] +; CHECK-SSE-NEXT: andps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps (%rdx), %xmm0 +; CHECK-SSE-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_42_vary: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: movaps {{.*#+}} xmm1 = [42,42,42,42] +; CHECK-SSE2-NEXT: andps %xmm0, %xmm1 +; CHECK-SSE2-NEXT: andnps (%rsi), %xmm0 +; CHECK-SSE2-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %mask, + %my = and <4 x i32> %notmask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @in_constant_42_vary(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_42_vary: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm1 = [5.885454e-44,5.885454e-44,5.885454e-44,5.885454e-44] +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andps (%rcx), %xmm1 +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_42_vary: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rsi), %xmm1 +; CHECK-SSE2-NEXT: movaps {{.*#+}} xmm0 = [42,42,42,42] +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @out_constant_42_vary_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: out_constant_42_vary_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rcx), %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE-NEXT: andnps {{.*}}(%rip), %xmm1 +; CHECK-SSE-NEXT: andps (%rdx), %xmm0 +; CHECK-SSE-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE-NEXT: movaps %xmm0, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_constant_42_vary_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: movaps %xmm0, %xmm1 +; CHECK-SSE2-NEXT: andnps {{.*}}(%rip), %xmm1 +; CHECK-SSE2-NEXT: andps (%rsi), %xmm0 +; CHECK-SSE2-NEXT: orps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %mx = and <4 x i32> %notmask, + %my = and <4 x i32> %mask, %y + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +; This is not a canonical form. Testing for completeness only. +define <4 x i32> @in_constant_42_vary_invmask(<4 x i32> *%px, <4 x i32> *%py, <4 x i32> *%pmask) { +; CHECK-SSE-LABEL: in_constant_42_vary_invmask: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE-NEXT: movaps (%rcx), %xmm1 +; CHECK-SSE-NEXT: movaps {{.*#+}} xmm2 = [5.885454e-44,5.885454e-44,5.885454e-44,5.885454e-44] +; CHECK-SSE-NEXT: xorps %xmm0, %xmm2 +; CHECK-SSE-NEXT: andnps %xmm2, %xmm1 +; CHECK-SSE-NEXT: xorps %xmm0, %xmm1 +; CHECK-SSE-NEXT: movaps %xmm1, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_constant_42_vary_invmask: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: movaps (%rsi), %xmm1 +; CHECK-SSE2-NEXT: movaps (%rdx), %xmm0 +; CHECK-SSE2-NEXT: movaps {{.*#+}} xmm2 = [42,42,42,42] +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: andnps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %x = load <4 x i32>, <4 x i32> *%px, align 16 + %y = load <4 x i32>, <4 x i32> *%py, align 16 + %mask = load <4 x i32>, <4 x i32> *%pmask, align 16 + %notmask = xor <4 x i32> %mask, + %n0 = xor <4 x i32> , %y ; %x + %n1 = and <4 x i32> %n0, %notmask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} Index: test/CodeGen/X86/unfold-masked-merge-vector-variablemask.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/unfold-masked-merge-vector-variablemask.ll @@ -0,0 +1,2364 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-sse,-sse2 < %s | FileCheck %s --check-prefix=CHECK-BASELINE +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,-sse2 < %s | FileCheck %s --check-prefix=CHECK-SSE +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,+sse2 < %s | FileCheck %s --check-prefix=CHECK-SSE2 + +; https://bugs.llvm.org/show_bug.cgi?id=37104 + +; All the advanced stuff (negative tests, commutativity) is handled in the +; scalar version of the test only. + +; ============================================================================ ; +; 8-bit vector width +; ============================================================================ ; + +define <1 x i8> @out_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v1i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andl %edx, %edi +; CHECK-BASELINE-NEXT: notb %dl +; CHECK-BASELINE-NEXT: andb %sil, %dl +; CHECK-BASELINE-NEXT: orb %dil, %dl +; CHECK-BASELINE-NEXT: movl %edx, %eax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v1i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andl %edx, %edi +; CHECK-SSE-NEXT: notb %dl +; CHECK-SSE-NEXT: andb %sil, %dl +; CHECK-SSE-NEXT: orb %dil, %dl +; CHECK-SSE-NEXT: movl %edx, %eax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v1i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andl %edx, %edi +; CHECK-SSE2-NEXT: notb %dl +; CHECK-SSE2-NEXT: andb %sil, %dl +; CHECK-SSE2-NEXT: orb %dil, %dl +; CHECK-SSE2-NEXT: movl %edx, %eax +; CHECK-SSE2-NEXT: retq + %mx = and <1 x i8> %x, %mask + %notmask = xor <1 x i8> %mask, + %my = and <1 x i8> %y, %notmask + %r = or <1 x i8> %mx, %my + ret <1 x i8> %r +} + +; ============================================================================ ; +; 16-bit vector width +; ============================================================================ ; + +define <2 x i8> @out_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v2i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andl %r8d, %edi +; CHECK-BASELINE-NEXT: andl %r9d, %esi +; CHECK-BASELINE-NEXT: notb %r8b +; CHECK-BASELINE-NEXT: notb %r9b +; CHECK-BASELINE-NEXT: andb %cl, %r9b +; CHECK-BASELINE-NEXT: andb %dl, %r8b +; CHECK-BASELINE-NEXT: orb %dil, %r8b +; CHECK-BASELINE-NEXT: orb %sil, %r9b +; CHECK-BASELINE-NEXT: movl %r8d, %eax +; CHECK-BASELINE-NEXT: movl %r9d, %edx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v2i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andl %r8d, %edi +; CHECK-SSE-NEXT: andl %r9d, %esi +; CHECK-SSE-NEXT: notb %r8b +; CHECK-SSE-NEXT: notb %r9b +; CHECK-SSE-NEXT: andb %cl, %r9b +; CHECK-SSE-NEXT: andb %dl, %r8b +; CHECK-SSE-NEXT: orb %dil, %r8b +; CHECK-SSE-NEXT: orb %sil, %r9b +; CHECK-SSE-NEXT: movl %r8d, %eax +; CHECK-SSE-NEXT: movl %r9d, %edx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v2i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <2 x i8> %x, %mask + %notmask = xor <2 x i8> %mask, + %my = and <2 x i8> %y, %notmask + %r = or <2 x i8> %mx, %my + ret <2 x i8> %r +} + +define <1 x i16> @out_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v1i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andl %edx, %edi +; CHECK-BASELINE-NEXT: notl %edx +; CHECK-BASELINE-NEXT: andl %esi, %edx +; CHECK-BASELINE-NEXT: orl %edi, %edx +; CHECK-BASELINE-NEXT: movl %edx, %eax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v1i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andl %edx, %edi +; CHECK-SSE-NEXT: notl %edx +; CHECK-SSE-NEXT: andl %esi, %edx +; CHECK-SSE-NEXT: orl %edi, %edx +; CHECK-SSE-NEXT: movl %edx, %eax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v1i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andl %edx, %edi +; CHECK-SSE2-NEXT: notl %edx +; CHECK-SSE2-NEXT: andl %esi, %edx +; CHECK-SSE2-NEXT: orl %edi, %edx +; CHECK-SSE2-NEXT: movl %edx, %eax +; CHECK-SSE2-NEXT: retq + %mx = and <1 x i16> %x, %mask + %notmask = xor <1 x i16> %mask, + %my = and <1 x i16> %y, %notmask + %r = or <1 x i16> %mx, %my + ret <1 x i16> %r +} + +; ============================================================================ ; +; 32-bit vector width +; ============================================================================ ; + +define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v4i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: andb %bl, %r8b +; CHECK-BASELINE-NEXT: andb %al, %cl +; CHECK-BASELINE-NEXT: andb %r11b, %dl +; CHECK-BASELINE-NEXT: andb %r10b, %sil +; CHECK-BASELINE-NEXT: notb %r11b +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: notb %bl +; CHECK-BASELINE-NEXT: notb %r10b +; CHECK-BASELINE-NEXT: andb %r9b, %r10b +; CHECK-BASELINE-NEXT: orb %sil, %r10b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: orb %r8b, %bl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %cl, %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: orb %dl, %r11b +; CHECK-BASELINE-NEXT: movb %bl, 3(%rdi) +; CHECK-BASELINE-NEXT: movb %al, 2(%rdi) +; CHECK-BASELINE-NEXT: movb %r11b, 1(%rdi) +; CHECK-BASELINE-NEXT: movb %r10b, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v4i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: andb %bl, %r8b +; CHECK-SSE-NEXT: andb %al, %cl +; CHECK-SSE-NEXT: andb %r11b, %dl +; CHECK-SSE-NEXT: andb %r10b, %sil +; CHECK-SSE-NEXT: notb %r11b +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: notb %bl +; CHECK-SSE-NEXT: notb %r10b +; CHECK-SSE-NEXT: andb %r9b, %r10b +; CHECK-SSE-NEXT: orb %sil, %r10b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: orb %r8b, %bl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %cl, %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: orb %dl, %r11b +; CHECK-SSE-NEXT: movb %bl, 3(%rdi) +; CHECK-SSE-NEXT: movb %al, 2(%rdi) +; CHECK-SSE-NEXT: movb %r11b, 1(%rdi) +; CHECK-SSE-NEXT: movb %r10b, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v4i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <4 x i8> %x, %mask + %notmask = xor <4 x i8> %mask, + %my = and <4 x i8> %y, %notmask + %r = or <4 x i8> %mx, %my + ret <4 x i8> %r +} + +define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v4i8_undef: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: andb %al, %r8b +; CHECK-BASELINE-NEXT: andb %r11b, %dl +; CHECK-BASELINE-NEXT: andb %r10b, %sil +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: notb %r11b +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: notb %r10b +; CHECK-BASELINE-NEXT: andb %r9b, %r10b +; CHECK-BASELINE-NEXT: orb %sil, %r10b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %r8b, %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: orb %dl, %r11b +; CHECK-BASELINE-NEXT: movb %cl, 2(%rdi) +; CHECK-BASELINE-NEXT: movb %al, 3(%rdi) +; CHECK-BASELINE-NEXT: movb %r11b, 1(%rdi) +; CHECK-BASELINE-NEXT: movb %r10b, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v4i8_undef: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: andb %al, %r8b +; CHECK-SSE-NEXT: andb %r11b, %dl +; CHECK-SSE-NEXT: andb %r10b, %sil +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: notb %r11b +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: notb %r10b +; CHECK-SSE-NEXT: andb %r9b, %r10b +; CHECK-SSE-NEXT: orb %sil, %r10b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %r8b, %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: orb %dl, %r11b +; CHECK-SSE-NEXT: movb %cl, 2(%rdi) +; CHECK-SSE-NEXT: movb %al, 3(%rdi) +; CHECK-SSE-NEXT: movb %r11b, 1(%rdi) +; CHECK-SSE-NEXT: movb %r10b, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v4i8_undef: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <4 x i8> %x, %mask + %notmask = xor <4 x i8> %mask, + %my = and <4 x i8> %y, %notmask + %r = or <4 x i8> %mx, %my + ret <4 x i8> %r +} + +define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v2i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andl %r9d, %esi +; CHECK-BASELINE-NEXT: andl %r8d, %edi +; CHECK-BASELINE-NEXT: notl %r8d +; CHECK-BASELINE-NEXT: notl %r9d +; CHECK-BASELINE-NEXT: andl %ecx, %r9d +; CHECK-BASELINE-NEXT: orl %esi, %r9d +; CHECK-BASELINE-NEXT: andl %edx, %r8d +; CHECK-BASELINE-NEXT: orl %edi, %r8d +; CHECK-BASELINE-NEXT: movl %r8d, %eax +; CHECK-BASELINE-NEXT: movl %r9d, %edx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v2i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andl %r9d, %esi +; CHECK-SSE-NEXT: andl %r8d, %edi +; CHECK-SSE-NEXT: notl %r8d +; CHECK-SSE-NEXT: notl %r9d +; CHECK-SSE-NEXT: andl %ecx, %r9d +; CHECK-SSE-NEXT: orl %esi, %r9d +; CHECK-SSE-NEXT: andl %edx, %r8d +; CHECK-SSE-NEXT: orl %edi, %r8d +; CHECK-SSE-NEXT: movl %r8d, %eax +; CHECK-SSE-NEXT: movl %r9d, %edx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v2i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <2 x i16> %x, %mask + %notmask = xor <2 x i16> %mask, + %my = and <2 x i16> %y, %notmask + %r = or <2 x i16> %mx, %my + ret <2 x i16> %r +} + +define <1 x i32> @out_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v1i32: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andl %edx, %edi +; CHECK-BASELINE-NEXT: notl %edx +; CHECK-BASELINE-NEXT: andl %esi, %edx +; CHECK-BASELINE-NEXT: orl %edi, %edx +; CHECK-BASELINE-NEXT: movl %edx, %eax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v1i32: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andl %edx, %edi +; CHECK-SSE-NEXT: notl %edx +; CHECK-SSE-NEXT: andl %esi, %edx +; CHECK-SSE-NEXT: orl %edi, %edx +; CHECK-SSE-NEXT: movl %edx, %eax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v1i32: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andl %edx, %edi +; CHECK-SSE2-NEXT: notl %edx +; CHECK-SSE2-NEXT: andl %esi, %edx +; CHECK-SSE2-NEXT: orl %edi, %edx +; CHECK-SSE2-NEXT: movl %edx, %eax +; CHECK-SSE2-NEXT: retq + %mx = and <1 x i32> %x, %mask + %notmask = xor <1 x i32> %mask, + %my = and <1 x i32> %y, %notmask + %r = or <1 x i32> %mx, %my + ret <1 x i32> %r +} + +; ============================================================================ ; +; 64-bit vector width +; ============================================================================ ; + +define <8 x i8> @out_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v8i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbp +; CHECK-BASELINE-NEXT: pushq %r15 +; CHECK-BASELINE-NEXT: pushq %r14 +; CHECK-BASELINE-NEXT: pushq %r13 +; CHECK-BASELINE-NEXT: pushq %r12 +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: andb %al, %r9b +; CHECK-BASELINE-NEXT: andb %bl, %r8b +; CHECK-BASELINE-NEXT: andb %r14b, %cl +; CHECK-BASELINE-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: andb %r11b, %dl +; CHECK-BASELINE-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: andb %r10b, %sil +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: andb %r12b, %r13b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: andb %r15b, %cl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: andb %bpl, %dl +; CHECK-BASELINE-NEXT: notb %r10b +; CHECK-BASELINE-NEXT: notb %r11b +; CHECK-BASELINE-NEXT: notb %r14b +; CHECK-BASELINE-NEXT: notb %bl +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: notb %bpl +; CHECK-BASELINE-NEXT: notb %r15b +; CHECK-BASELINE-NEXT: notb %r12b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: orb %r13b, %r12b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: orb %cl, %r15b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: orb %dl, %bpl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %r9b, %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: orb %r8b, %bl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: orb {{[-0-9]+}}(%r{{[sb]}}p), %r14b # 1-byte Folded Reload +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: orb {{[-0-9]+}}(%r{{[sb]}}p), %r11b # 1-byte Folded Reload +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: orb %sil, %r10b +; CHECK-BASELINE-NEXT: movb %r12b, 7(%rdi) +; CHECK-BASELINE-NEXT: movb %r15b, 6(%rdi) +; CHECK-BASELINE-NEXT: movb %bpl, 5(%rdi) +; CHECK-BASELINE-NEXT: movb %al, 4(%rdi) +; CHECK-BASELINE-NEXT: movb %bl, 3(%rdi) +; CHECK-BASELINE-NEXT: movb %r14b, 2(%rdi) +; CHECK-BASELINE-NEXT: movb %r11b, 1(%rdi) +; CHECK-BASELINE-NEXT: movb %r10b, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: popq %r12 +; CHECK-BASELINE-NEXT: popq %r13 +; CHECK-BASELINE-NEXT: popq %r14 +; CHECK-BASELINE-NEXT: popq %r15 +; CHECK-BASELINE-NEXT: popq %rbp +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v8i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbp +; CHECK-SSE-NEXT: pushq %r15 +; CHECK-SSE-NEXT: pushq %r14 +; CHECK-SSE-NEXT: pushq %r13 +; CHECK-SSE-NEXT: pushq %r12 +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: andb %al, %r9b +; CHECK-SSE-NEXT: andb %bl, %r8b +; CHECK-SSE-NEXT: andb %r14b, %cl +; CHECK-SSE-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: andb %r11b, %dl +; CHECK-SSE-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: andb %r10b, %sil +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: andb %r12b, %r13b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: andb %r15b, %cl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: andb %bpl, %dl +; CHECK-SSE-NEXT: notb %r10b +; CHECK-SSE-NEXT: notb %r11b +; CHECK-SSE-NEXT: notb %r14b +; CHECK-SSE-NEXT: notb %bl +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: notb %bpl +; CHECK-SSE-NEXT: notb %r15b +; CHECK-SSE-NEXT: notb %r12b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: orb %r13b, %r12b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: orb %cl, %r15b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: orb %dl, %bpl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %r9b, %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: orb %r8b, %bl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: orb {{[-0-9]+}}(%r{{[sb]}}p), %r14b # 1-byte Folded Reload +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: orb {{[-0-9]+}}(%r{{[sb]}}p), %r11b # 1-byte Folded Reload +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: orb %sil, %r10b +; CHECK-SSE-NEXT: movb %r12b, 7(%rdi) +; CHECK-SSE-NEXT: movb %r15b, 6(%rdi) +; CHECK-SSE-NEXT: movb %bpl, 5(%rdi) +; CHECK-SSE-NEXT: movb %al, 4(%rdi) +; CHECK-SSE-NEXT: movb %bl, 3(%rdi) +; CHECK-SSE-NEXT: movb %r14b, 2(%rdi) +; CHECK-SSE-NEXT: movb %r11b, 1(%rdi) +; CHECK-SSE-NEXT: movb %r10b, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: popq %r12 +; CHECK-SSE-NEXT: popq %r13 +; CHECK-SSE-NEXT: popq %r14 +; CHECK-SSE-NEXT: popq %r15 +; CHECK-SSE-NEXT: popq %rbp +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v8i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <8 x i8> %x, %mask + %notmask = xor <8 x i8> %mask, + %my = and <8 x i8> %y, %notmask + %r = or <8 x i8> %mx, %my + ret <8 x i8> %r +} + +define <4 x i16> @out_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v4i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andl %ebx, %esi +; CHECK-BASELINE-NEXT: andl %eax, %r8d +; CHECK-BASELINE-NEXT: andl %r11d, %ecx +; CHECK-BASELINE-NEXT: andl %r10d, %edx +; CHECK-BASELINE-NEXT: notl %r10d +; CHECK-BASELINE-NEXT: notl %r11d +; CHECK-BASELINE-NEXT: notl %eax +; CHECK-BASELINE-NEXT: notl %ebx +; CHECK-BASELINE-NEXT: andl %r9d, %ebx +; CHECK-BASELINE-NEXT: orl %esi, %ebx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %ax +; CHECK-BASELINE-NEXT: orl %r8d, %eax +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r11w +; CHECK-BASELINE-NEXT: orl %ecx, %r11d +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r10w +; CHECK-BASELINE-NEXT: orl %edx, %r10d +; CHECK-BASELINE-NEXT: movw %bx, (%rdi) +; CHECK-BASELINE-NEXT: movw %ax, 6(%rdi) +; CHECK-BASELINE-NEXT: movw %r11w, 4(%rdi) +; CHECK-BASELINE-NEXT: movw %r10w, 2(%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v4i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: andl %ebx, %esi +; CHECK-SSE-NEXT: andl %eax, %r8d +; CHECK-SSE-NEXT: andl %r11d, %ecx +; CHECK-SSE-NEXT: andl %r10d, %edx +; CHECK-SSE-NEXT: notl %r10d +; CHECK-SSE-NEXT: notl %r11d +; CHECK-SSE-NEXT: notl %eax +; CHECK-SSE-NEXT: notl %ebx +; CHECK-SSE-NEXT: andl %r9d, %ebx +; CHECK-SSE-NEXT: orl %esi, %ebx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %ax +; CHECK-SSE-NEXT: orl %r8d, %eax +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r11w +; CHECK-SSE-NEXT: orl %ecx, %r11d +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r10w +; CHECK-SSE-NEXT: orl %edx, %r10d +; CHECK-SSE-NEXT: movw %bx, (%rdi) +; CHECK-SSE-NEXT: movw %ax, 6(%rdi) +; CHECK-SSE-NEXT: movw %r11w, 4(%rdi) +; CHECK-SSE-NEXT: movw %r10w, 2(%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v4i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <4 x i16> %x, %mask + %notmask = xor <4 x i16> %mask, + %my = and <4 x i16> %y, %notmask + %r = or <4 x i16> %mx, %my + ret <4 x i16> %r +} + +define <4 x i16> @out_v4i16_undef(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v4i16_undef: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: andl %eax, %esi +; CHECK-BASELINE-NEXT: andl %r11d, %r8d +; CHECK-BASELINE-NEXT: andl %r10d, %edx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-BASELINE-NEXT: notl %r10d +; CHECK-BASELINE-NEXT: notl %r11d +; CHECK-BASELINE-NEXT: notl %eax +; CHECK-BASELINE-NEXT: andl %r9d, %eax +; CHECK-BASELINE-NEXT: orl %esi, %eax +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r11w +; CHECK-BASELINE-NEXT: orl %r8d, %r11d +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r10w +; CHECK-BASELINE-NEXT: orl %edx, %r10d +; CHECK-BASELINE-NEXT: movw %cx, 4(%rdi) +; CHECK-BASELINE-NEXT: movw %ax, (%rdi) +; CHECK-BASELINE-NEXT: movw %r11w, 6(%rdi) +; CHECK-BASELINE-NEXT: movw %r10w, 2(%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v4i16_undef: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: andl %eax, %esi +; CHECK-SSE-NEXT: andl %r11d, %r8d +; CHECK-SSE-NEXT: andl %r10d, %edx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-SSE-NEXT: notl %r10d +; CHECK-SSE-NEXT: notl %r11d +; CHECK-SSE-NEXT: notl %eax +; CHECK-SSE-NEXT: andl %r9d, %eax +; CHECK-SSE-NEXT: orl %esi, %eax +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r11w +; CHECK-SSE-NEXT: orl %r8d, %r11d +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r10w +; CHECK-SSE-NEXT: orl %edx, %r10d +; CHECK-SSE-NEXT: movw %cx, 4(%rdi) +; CHECK-SSE-NEXT: movw %ax, (%rdi) +; CHECK-SSE-NEXT: movw %r11w, 6(%rdi) +; CHECK-SSE-NEXT: movw %r10w, 2(%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v4i16_undef: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <4 x i16> %x, %mask + %notmask = xor <4 x i16> %mask, + %my = and <4 x i16> %y, %notmask + %r = or <4 x i16> %mx, %my + ret <4 x i16> %r +} + +define <2 x i32> @out_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v2i32: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andl %r9d, %esi +; CHECK-BASELINE-NEXT: andl %r8d, %edi +; CHECK-BASELINE-NEXT: notl %r8d +; CHECK-BASELINE-NEXT: notl %r9d +; CHECK-BASELINE-NEXT: andl %ecx, %r9d +; CHECK-BASELINE-NEXT: orl %esi, %r9d +; CHECK-BASELINE-NEXT: andl %edx, %r8d +; CHECK-BASELINE-NEXT: orl %edi, %r8d +; CHECK-BASELINE-NEXT: movl %r8d, %eax +; CHECK-BASELINE-NEXT: movl %r9d, %edx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v2i32: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andl %r9d, %esi +; CHECK-SSE-NEXT: andl %r8d, %edi +; CHECK-SSE-NEXT: notl %r8d +; CHECK-SSE-NEXT: notl %r9d +; CHECK-SSE-NEXT: andl %ecx, %r9d +; CHECK-SSE-NEXT: orl %esi, %r9d +; CHECK-SSE-NEXT: andl %edx, %r8d +; CHECK-SSE-NEXT: orl %edi, %r8d +; CHECK-SSE-NEXT: movl %r8d, %eax +; CHECK-SSE-NEXT: movl %r9d, %edx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v2i32: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps {{.*}}(%rip), %xmm2 +; CHECK-SSE2-NEXT: andps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <2 x i32> %x, %mask + %notmask = xor <2 x i32> %mask, + %my = and <2 x i32> %y, %notmask + %r = or <2 x i32> %mx, %my + ret <2 x i32> %r +} + +define <1 x i64> @out_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v1i64: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andq %rdx, %rdi +; CHECK-BASELINE-NEXT: notq %rdx +; CHECK-BASELINE-NEXT: andq %rsi, %rdx +; CHECK-BASELINE-NEXT: orq %rdi, %rdx +; CHECK-BASELINE-NEXT: movq %rdx, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v1i64: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andq %rdx, %rdi +; CHECK-SSE-NEXT: notq %rdx +; CHECK-SSE-NEXT: andq %rsi, %rdx +; CHECK-SSE-NEXT: orq %rdi, %rdx +; CHECK-SSE-NEXT: movq %rdx, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v1i64: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andq %rdx, %rdi +; CHECK-SSE2-NEXT: notq %rdx +; CHECK-SSE2-NEXT: andq %rsi, %rdx +; CHECK-SSE2-NEXT: orq %rdi, %rdx +; CHECK-SSE2-NEXT: movq %rdx, %rax +; CHECK-SSE2-NEXT: retq + %mx = and <1 x i64> %x, %mask + %notmask = xor <1 x i64> %mask, + %my = and <1 x i64> %y, %notmask + %r = or <1 x i64> %mx, %my + ret <1 x i64> %r +} + +; ============================================================================ ; +; 128-bit vector width +; ============================================================================ ; + +define <16 x i8> @out_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v16i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbp +; CHECK-BASELINE-NEXT: pushq %r15 +; CHECK-BASELINE-NEXT: pushq %r14 +; CHECK-BASELINE-NEXT: pushq %r13 +; CHECK-BASELINE-NEXT: pushq %r12 +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %al, %sil +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %sil, %al +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %cl, %sil +; CHECK-BASELINE-NEXT: notb %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: orb %sil, %cl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %dl, %sil +; CHECK-BASELINE-NEXT: notb %dl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: orb %sil, %dl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %bl, %sil +; CHECK-BASELINE-NEXT: notb %bl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: orb %sil, %bl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %r13b, %sil +; CHECK-BASELINE-NEXT: notb %r13b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: orb %sil, %r13b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %r12b, %sil +; CHECK-BASELINE-NEXT: notb %r12b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: orb %sil, %r12b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %r15b, %sil +; CHECK-BASELINE-NEXT: notb %r15b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: orb %sil, %r15b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %r14b, %sil +; CHECK-BASELINE-NEXT: notb %r14b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: orb %sil, %r14b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %bpl, %sil +; CHECK-BASELINE-NEXT: notb %bpl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: orb %sil, %bpl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %r11b, %sil +; CHECK-BASELINE-NEXT: notb %r11b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: orb %sil, %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb %r10b, %sil +; CHECK-BASELINE-NEXT: notb %r10b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: orb %sil, %r10b +; CHECK-BASELINE-NEXT: movb %al, 15(%rdi) +; CHECK-BASELINE-NEXT: movb %cl, 14(%rdi) +; CHECK-BASELINE-NEXT: movb %dl, 13(%rdi) +; CHECK-BASELINE-NEXT: movb %bl, 12(%rdi) +; CHECK-BASELINE-NEXT: movb %r13b, 11(%rdi) +; CHECK-BASELINE-NEXT: movb %r12b, 10(%rdi) +; CHECK-BASELINE-NEXT: movb %r15b, 9(%rdi) +; CHECK-BASELINE-NEXT: movb %r14b, 8(%rdi) +; CHECK-BASELINE-NEXT: movb %bpl, 7(%rdi) +; CHECK-BASELINE-NEXT: movb %r11b, 6(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: andb %al, %r9b +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %r9b, %al +; CHECK-BASELINE-NEXT: movb %r10b, 5(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: andb %cl, %r8b +; CHECK-BASELINE-NEXT: notb %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: orb %r8b, %cl +; CHECK-BASELINE-NEXT: movb %al, 4(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload +; CHECK-BASELINE-NEXT: andb %al, %dl +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %dl, %al +; CHECK-BASELINE-NEXT: movb %cl, 3(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload +; CHECK-BASELINE-NEXT: andb %cl, %dl +; CHECK-BASELINE-NEXT: notb %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: orb %dl, %cl +; CHECK-BASELINE-NEXT: movb %al, 2(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload +; CHECK-BASELINE-NEXT: andb %al, %dl +; CHECK-BASELINE-NEXT: notb %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: orb %dl, %al +; CHECK-BASELINE-NEXT: movb %cl, 1(%rdi) +; CHECK-BASELINE-NEXT: movb %al, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: popq %r12 +; CHECK-BASELINE-NEXT: popq %r13 +; CHECK-BASELINE-NEXT: popq %r14 +; CHECK-BASELINE-NEXT: popq %r15 +; CHECK-BASELINE-NEXT: popq %rbp +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v16i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbp +; CHECK-SSE-NEXT: pushq %r15 +; CHECK-SSE-NEXT: pushq %r14 +; CHECK-SSE-NEXT: pushq %r13 +; CHECK-SSE-NEXT: pushq %r12 +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %al, %sil +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %sil, %al +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %cl, %sil +; CHECK-SSE-NEXT: notb %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: orb %sil, %cl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %dl, %sil +; CHECK-SSE-NEXT: notb %dl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: orb %sil, %dl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %bl, %sil +; CHECK-SSE-NEXT: notb %bl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: orb %sil, %bl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %r13b, %sil +; CHECK-SSE-NEXT: notb %r13b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: orb %sil, %r13b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %r12b, %sil +; CHECK-SSE-NEXT: notb %r12b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: orb %sil, %r12b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %r15b, %sil +; CHECK-SSE-NEXT: notb %r15b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: orb %sil, %r15b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %r14b, %sil +; CHECK-SSE-NEXT: notb %r14b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: orb %sil, %r14b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %bpl, %sil +; CHECK-SSE-NEXT: notb %bpl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: orb %sil, %bpl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %r11b, %sil +; CHECK-SSE-NEXT: notb %r11b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: orb %sil, %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb %r10b, %sil +; CHECK-SSE-NEXT: notb %r10b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: orb %sil, %r10b +; CHECK-SSE-NEXT: movb %al, 15(%rdi) +; CHECK-SSE-NEXT: movb %cl, 14(%rdi) +; CHECK-SSE-NEXT: movb %dl, 13(%rdi) +; CHECK-SSE-NEXT: movb %bl, 12(%rdi) +; CHECK-SSE-NEXT: movb %r13b, 11(%rdi) +; CHECK-SSE-NEXT: movb %r12b, 10(%rdi) +; CHECK-SSE-NEXT: movb %r15b, 9(%rdi) +; CHECK-SSE-NEXT: movb %r14b, 8(%rdi) +; CHECK-SSE-NEXT: movb %bpl, 7(%rdi) +; CHECK-SSE-NEXT: movb %r11b, 6(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: andb %al, %r9b +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %r9b, %al +; CHECK-SSE-NEXT: movb %r10b, 5(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: andb %cl, %r8b +; CHECK-SSE-NEXT: notb %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: orb %r8b, %cl +; CHECK-SSE-NEXT: movb %al, 4(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload +; CHECK-SSE-NEXT: andb %al, %dl +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %dl, %al +; CHECK-SSE-NEXT: movb %cl, 3(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload +; CHECK-SSE-NEXT: andb %cl, %dl +; CHECK-SSE-NEXT: notb %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: orb %dl, %cl +; CHECK-SSE-NEXT: movb %al, 2(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 4-byte Reload +; CHECK-SSE-NEXT: andb %al, %dl +; CHECK-SSE-NEXT: notb %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: orb %dl, %al +; CHECK-SSE-NEXT: movb %cl, 1(%rdi) +; CHECK-SSE-NEXT: movb %al, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: popq %r12 +; CHECK-SSE-NEXT: popq %r13 +; CHECK-SSE-NEXT: popq %r14 +; CHECK-SSE-NEXT: popq %r15 +; CHECK-SSE-NEXT: popq %rbp +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v16i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: andnps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <16 x i8> %x, %mask + %notmask = xor <16 x i8> %mask, + %my = and <16 x i8> %y, %notmask + %r = or <16 x i8> %mx, %my + ret <16 x i8> %r +} + +define <8 x i16> @out_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v8i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbp +; CHECK-BASELINE-NEXT: pushq %r14 +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r14d +; CHECK-BASELINE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andw %r14w, %bx +; CHECK-BASELINE-NEXT: notl %r14d +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r14w +; CHECK-BASELINE-NEXT: orl %ebx, %r14d +; CHECK-BASELINE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andw %r11w, %bx +; CHECK-BASELINE-NEXT: notl %r11d +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r11w +; CHECK-BASELINE-NEXT: orl %ebx, %r11d +; CHECK-BASELINE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andw %r10w, %bx +; CHECK-BASELINE-NEXT: notl %r10d +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r10w +; CHECK-BASELINE-NEXT: orl %ebx, %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andl %ebx, %r9d +; CHECK-BASELINE-NEXT: notl %ebx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %bx +; CHECK-BASELINE-NEXT: orl %r9d, %ebx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: andl %eax, %r8d +; CHECK-BASELINE-NEXT: notl %eax +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %ax +; CHECK-BASELINE-NEXT: orl %r8d, %eax +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebp +; CHECK-BASELINE-NEXT: andl %ebp, %ecx +; CHECK-BASELINE-NEXT: notl %ebp +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %bp +; CHECK-BASELINE-NEXT: orl %ecx, %ebp +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ecx +; CHECK-BASELINE-NEXT: andl %ecx, %edx +; CHECK-BASELINE-NEXT: notl %ecx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-BASELINE-NEXT: orl %edx, %ecx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %edx +; CHECK-BASELINE-NEXT: andl %edx, %esi +; CHECK-BASELINE-NEXT: notl %edx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %dx +; CHECK-BASELINE-NEXT: orl %esi, %edx +; CHECK-BASELINE-NEXT: movw %r14w, 14(%rdi) +; CHECK-BASELINE-NEXT: movw %r11w, 12(%rdi) +; CHECK-BASELINE-NEXT: movw %r10w, 10(%rdi) +; CHECK-BASELINE-NEXT: movw %bx, 8(%rdi) +; CHECK-BASELINE-NEXT: movw %ax, 6(%rdi) +; CHECK-BASELINE-NEXT: movw %bp, 4(%rdi) +; CHECK-BASELINE-NEXT: movw %cx, 2(%rdi) +; CHECK-BASELINE-NEXT: movw %dx, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: popq %r14 +; CHECK-BASELINE-NEXT: popq %rbp +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v8i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbp +; CHECK-SSE-NEXT: pushq %r14 +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r14d +; CHECK-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: andw %r14w, %bx +; CHECK-SSE-NEXT: notl %r14d +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r14w +; CHECK-SSE-NEXT: orl %ebx, %r14d +; CHECK-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: andw %r11w, %bx +; CHECK-SSE-NEXT: notl %r11d +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r11w +; CHECK-SSE-NEXT: orl %ebx, %r11d +; CHECK-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: andw %r10w, %bx +; CHECK-SSE-NEXT: notl %r10d +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r10w +; CHECK-SSE-NEXT: orl %ebx, %r10d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: andl %ebx, %r9d +; CHECK-SSE-NEXT: notl %ebx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %bx +; CHECK-SSE-NEXT: orl %r9d, %ebx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: andl %eax, %r8d +; CHECK-SSE-NEXT: notl %eax +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %ax +; CHECK-SSE-NEXT: orl %r8d, %eax +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ebp +; CHECK-SSE-NEXT: andl %ebp, %ecx +; CHECK-SSE-NEXT: notl %ebp +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %bp +; CHECK-SSE-NEXT: orl %ecx, %ebp +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ecx +; CHECK-SSE-NEXT: andl %ecx, %edx +; CHECK-SSE-NEXT: notl %ecx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-SSE-NEXT: orl %edx, %ecx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %edx +; CHECK-SSE-NEXT: andl %edx, %esi +; CHECK-SSE-NEXT: notl %edx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %dx +; CHECK-SSE-NEXT: orl %esi, %edx +; CHECK-SSE-NEXT: movw %r14w, 14(%rdi) +; CHECK-SSE-NEXT: movw %r11w, 12(%rdi) +; CHECK-SSE-NEXT: movw %r10w, 10(%rdi) +; CHECK-SSE-NEXT: movw %bx, 8(%rdi) +; CHECK-SSE-NEXT: movw %ax, 6(%rdi) +; CHECK-SSE-NEXT: movw %bp, 4(%rdi) +; CHECK-SSE-NEXT: movw %cx, 2(%rdi) +; CHECK-SSE-NEXT: movw %dx, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: popq %r14 +; CHECK-SSE-NEXT: popq %rbp +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v8i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: andnps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <8 x i16> %x, %mask + %notmask = xor <8 x i16> %mask, + %my = and <8 x i16> %y, %notmask + %r = or <8 x i16> %mx, %my + ret <8 x i16> %r +} + +define <4 x i32> @out_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v4i32: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andl %ebx, %r8d +; CHECK-BASELINE-NEXT: andl %eax, %ecx +; CHECK-BASELINE-NEXT: andl %r11d, %edx +; CHECK-BASELINE-NEXT: andl %r10d, %esi +; CHECK-BASELINE-NEXT: notl %r11d +; CHECK-BASELINE-NEXT: notl %eax +; CHECK-BASELINE-NEXT: notl %ebx +; CHECK-BASELINE-NEXT: notl %r10d +; CHECK-BASELINE-NEXT: andl %r9d, %r10d +; CHECK-BASELINE-NEXT: orl %esi, %r10d +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: orl %r8d, %ebx +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: orl %ecx, %eax +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: orl %edx, %r11d +; CHECK-BASELINE-NEXT: movl %ebx, 12(%rdi) +; CHECK-BASELINE-NEXT: movl %eax, 8(%rdi) +; CHECK-BASELINE-NEXT: movl %r11d, 4(%rdi) +; CHECK-BASELINE-NEXT: movl %r10d, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v4i32: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %r9d, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %r8d, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %esi, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0] +; CHECK-SSE-NEXT: andps %xmm3, %xmm4 +; CHECK-SSE-NEXT: andnps %xmm2, %xmm3 +; CHECK-SSE-NEXT: orps %xmm4, %xmm3 +; CHECK-SSE-NEXT: movaps %xmm3, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v4i32: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: andnps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <4 x i32> %x, %mask + %notmask = xor <4 x i32> %mask, + %my = and <4 x i32> %y, %notmask + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <4 x i32> @out_v4i32_undef(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v4i32_undef: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: andl %eax, %r8d +; CHECK-BASELINE-NEXT: andl %r11d, %edx +; CHECK-BASELINE-NEXT: andl %r10d, %esi +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %ecx +; CHECK-BASELINE-NEXT: notl %r11d +; CHECK-BASELINE-NEXT: notl %eax +; CHECK-BASELINE-NEXT: notl %r10d +; CHECK-BASELINE-NEXT: andl %r9d, %r10d +; CHECK-BASELINE-NEXT: orl %esi, %r10d +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: orl %r8d, %eax +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: orl %edx, %r11d +; CHECK-BASELINE-NEXT: movl %ecx, 8(%rdi) +; CHECK-BASELINE-NEXT: movl %eax, 12(%rdi) +; CHECK-BASELINE-NEXT: movl %r11d, 4(%rdi) +; CHECK-BASELINE-NEXT: movl %r10d, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v4i32_undef: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %r9d, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %r8d, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %esi, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0] +; CHECK-SSE-NEXT: andps %xmm3, %xmm4 +; CHECK-SSE-NEXT: andnps %xmm2, %xmm3 +; CHECK-SSE-NEXT: orps %xmm4, %xmm3 +; CHECK-SSE-NEXT: movaps %xmm3, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v4i32_undef: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: andnps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <4 x i32> %x, %mask + %notmask = xor <4 x i32> %mask, + %my = and <4 x i32> %y, %notmask + %r = or <4 x i32> %mx, %my + ret <4 x i32> %r +} + +define <2 x i64> @out_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind { +; CHECK-BASELINE-LABEL: out_v2i64: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: andq %r9, %rsi +; CHECK-BASELINE-NEXT: andq %r8, %rdi +; CHECK-BASELINE-NEXT: notq %r8 +; CHECK-BASELINE-NEXT: notq %r9 +; CHECK-BASELINE-NEXT: andq %rcx, %r9 +; CHECK-BASELINE-NEXT: orq %rsi, %r9 +; CHECK-BASELINE-NEXT: andq %rdx, %r8 +; CHECK-BASELINE-NEXT: orq %rdi, %r8 +; CHECK-BASELINE-NEXT: movq %r8, %rax +; CHECK-BASELINE-NEXT: movq %r9, %rdx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: out_v2i64: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: andq %r9, %rsi +; CHECK-SSE-NEXT: andq %r8, %rdi +; CHECK-SSE-NEXT: notq %r8 +; CHECK-SSE-NEXT: notq %r9 +; CHECK-SSE-NEXT: andq %rcx, %r9 +; CHECK-SSE-NEXT: orq %rsi, %r9 +; CHECK-SSE-NEXT: andq %rdx, %r8 +; CHECK-SSE-NEXT: orq %rdi, %r8 +; CHECK-SSE-NEXT: movq %r8, %rax +; CHECK-SSE-NEXT: movq %r9, %rdx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: out_v2i64: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: andnps %xmm1, %xmm2 +; CHECK-SSE2-NEXT: orps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: retq + %mx = and <2 x i64> %x, %mask + %notmask = xor <2 x i64> %mask, + %my = and <2 x i64> %y, %notmask + %r = or <2 x i64> %mx, %my + ret <2 x i64> %r +} + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Should be the same as the previous one. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; ============================================================================ ; +; 8-bit vector width +; ============================================================================ ; + +define <1 x i8> @in_v1i8(<1 x i8> %x, <1 x i8> %y, <1 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v1i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %esi, %edi +; CHECK-BASELINE-NEXT: andl %edx, %edi +; CHECK-BASELINE-NEXT: xorl %esi, %edi +; CHECK-BASELINE-NEXT: movl %edi, %eax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v1i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %esi, %edi +; CHECK-SSE-NEXT: andl %edx, %edi +; CHECK-SSE-NEXT: xorl %esi, %edi +; CHECK-SSE-NEXT: movl %edi, %eax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v1i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorl %esi, %edi +; CHECK-SSE2-NEXT: andl %edx, %edi +; CHECK-SSE2-NEXT: xorl %esi, %edi +; CHECK-SSE2-NEXT: movl %edi, %eax +; CHECK-SSE2-NEXT: retq + %n0 = xor <1 x i8> %x, %y + %n1 = and <1 x i8> %n0, %mask + %r = xor <1 x i8> %n1, %y + ret <1 x i8> %r +} + +; ============================================================================ ; +; 16-bit vector width +; ============================================================================ ; + +define <2 x i8> @in_v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v2i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %ecx, %esi +; CHECK-BASELINE-NEXT: xorl %edx, %edi +; CHECK-BASELINE-NEXT: andl %r8d, %edi +; CHECK-BASELINE-NEXT: andl %r9d, %esi +; CHECK-BASELINE-NEXT: xorl %ecx, %esi +; CHECK-BASELINE-NEXT: xorl %edx, %edi +; CHECK-BASELINE-NEXT: movl %edi, %eax +; CHECK-BASELINE-NEXT: movl %esi, %edx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v2i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %ecx, %esi +; CHECK-SSE-NEXT: xorl %edx, %edi +; CHECK-SSE-NEXT: andl %r8d, %edi +; CHECK-SSE-NEXT: andl %r9d, %esi +; CHECK-SSE-NEXT: xorl %ecx, %esi +; CHECK-SSE-NEXT: xorl %edx, %edi +; CHECK-SSE-NEXT: movl %edi, %eax +; CHECK-SSE-NEXT: movl %esi, %edx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v2i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <2 x i8> %x, %y + %n1 = and <2 x i8> %n0, %mask + %r = xor <2 x i8> %n1, %y + ret <2 x i8> %r +} + +define <1 x i16> @in_v1i16(<1 x i16> %x, <1 x i16> %y, <1 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v1i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %esi, %edi +; CHECK-BASELINE-NEXT: andl %edx, %edi +; CHECK-BASELINE-NEXT: xorl %esi, %edi +; CHECK-BASELINE-NEXT: movl %edi, %eax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v1i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %esi, %edi +; CHECK-SSE-NEXT: andl %edx, %edi +; CHECK-SSE-NEXT: xorl %esi, %edi +; CHECK-SSE-NEXT: movl %edi, %eax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v1i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorl %esi, %edi +; CHECK-SSE2-NEXT: andl %edx, %edi +; CHECK-SSE2-NEXT: xorl %esi, %edi +; CHECK-SSE2-NEXT: movl %edi, %eax +; CHECK-SSE2-NEXT: retq + %n0 = xor <1 x i16> %x, %y + %n1 = and <1 x i16> %n0, %mask + %r = xor <1 x i16> %n1, %y + ret <1 x i16> %r +} + +; ============================================================================ ; +; 32-bit vector width +; ============================================================================ ; + +define <4 x i8> @in_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v4i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %r9d, %esi +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: xorb %al, %dl +; CHECK-BASELINE-NEXT: xorb %r11b, %cl +; CHECK-BASELINE-NEXT: xorb %r10b, %r8b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r8b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: xorb %r9b, %sil +; CHECK-BASELINE-NEXT: xorb %al, %dl +; CHECK-BASELINE-NEXT: xorb %r11b, %cl +; CHECK-BASELINE-NEXT: xorb %r10b, %r8b +; CHECK-BASELINE-NEXT: movb %r8b, 3(%rdi) +; CHECK-BASELINE-NEXT: movb %cl, 2(%rdi) +; CHECK-BASELINE-NEXT: movb %dl, 1(%rdi) +; CHECK-BASELINE-NEXT: movb %sil, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v4i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %r9d, %esi +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: xorb %al, %dl +; CHECK-SSE-NEXT: xorb %r11b, %cl +; CHECK-SSE-NEXT: xorb %r10b, %r8b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r8b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: xorb %r9b, %sil +; CHECK-SSE-NEXT: xorb %al, %dl +; CHECK-SSE-NEXT: xorb %r11b, %cl +; CHECK-SSE-NEXT: xorb %r10b, %r8b +; CHECK-SSE-NEXT: movb %r8b, 3(%rdi) +; CHECK-SSE-NEXT: movb %cl, 2(%rdi) +; CHECK-SSE-NEXT: movb %dl, 1(%rdi) +; CHECK-SSE-NEXT: movb %sil, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v4i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <4 x i8> %x, %y + %n1 = and <4 x i8> %n0, %mask + %r = xor <4 x i8> %n1, %y + ret <4 x i8> %r +} + +define <2 x i16> @in_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v2i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %ecx, %esi +; CHECK-BASELINE-NEXT: xorl %edx, %edi +; CHECK-BASELINE-NEXT: andl %r8d, %edi +; CHECK-BASELINE-NEXT: andl %r9d, %esi +; CHECK-BASELINE-NEXT: xorl %ecx, %esi +; CHECK-BASELINE-NEXT: xorl %edx, %edi +; CHECK-BASELINE-NEXT: movl %edi, %eax +; CHECK-BASELINE-NEXT: movl %esi, %edx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v2i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %ecx, %esi +; CHECK-SSE-NEXT: xorl %edx, %edi +; CHECK-SSE-NEXT: andl %r8d, %edi +; CHECK-SSE-NEXT: andl %r9d, %esi +; CHECK-SSE-NEXT: xorl %ecx, %esi +; CHECK-SSE-NEXT: xorl %edx, %edi +; CHECK-SSE-NEXT: movl %edi, %eax +; CHECK-SSE-NEXT: movl %esi, %edx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v2i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <2 x i16> %x, %y + %n1 = and <2 x i16> %n0, %mask + %r = xor <2 x i16> %n1, %y + ret <2 x i16> %r +} + +define <1 x i32> @in_v1i32(<1 x i32> %x, <1 x i32> %y, <1 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v1i32: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %esi, %edi +; CHECK-BASELINE-NEXT: andl %edx, %edi +; CHECK-BASELINE-NEXT: xorl %esi, %edi +; CHECK-BASELINE-NEXT: movl %edi, %eax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v1i32: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %esi, %edi +; CHECK-SSE-NEXT: andl %edx, %edi +; CHECK-SSE-NEXT: xorl %esi, %edi +; CHECK-SSE-NEXT: movl %edi, %eax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v1i32: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorl %esi, %edi +; CHECK-SSE2-NEXT: andl %edx, %edi +; CHECK-SSE2-NEXT: xorl %esi, %edi +; CHECK-SSE2-NEXT: movl %edi, %eax +; CHECK-SSE2-NEXT: retq + %n0 = xor <1 x i32> %x, %y + %n1 = and <1 x i32> %n0, %mask + %r = xor <1 x i32> %n1, %y + ret <1 x i32> %r +} + +; ============================================================================ ; +; 64-bit vector width +; ============================================================================ ; + +define <8 x i8> @in_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v8i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbp +; CHECK-BASELINE-NEXT: pushq %r15 +; CHECK-BASELINE-NEXT: pushq %r14 +; CHECK-BASELINE-NEXT: pushq %r13 +; CHECK-BASELINE-NEXT: pushq %r12 +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl %ecx, %r10d +; CHECK-BASELINE-NEXT: movl %edx, %r11d +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: xorb %bpl, %sil +; CHECK-BASELINE-NEXT: xorb %r13b, %r11b +; CHECK-BASELINE-NEXT: xorb %r12b, %r10b +; CHECK-BASELINE-NEXT: xorb %r15b, %r8b +; CHECK-BASELINE-NEXT: xorb %r14b, %r9b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: xorb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: xorb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: xorb %bl, %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r9b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r8b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: xorb %bpl, %sil +; CHECK-BASELINE-NEXT: xorb %r13b, %r11b +; CHECK-BASELINE-NEXT: xorb %r12b, %r10b +; CHECK-BASELINE-NEXT: xorb %r15b, %r8b +; CHECK-BASELINE-NEXT: xorb %r14b, %r9b +; CHECK-BASELINE-NEXT: xorb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: xorb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: xorb %bl, %al +; CHECK-BASELINE-NEXT: movb %al, 7(%rdi) +; CHECK-BASELINE-NEXT: movb %cl, 6(%rdi) +; CHECK-BASELINE-NEXT: movb %dl, 5(%rdi) +; CHECK-BASELINE-NEXT: movb %r9b, 4(%rdi) +; CHECK-BASELINE-NEXT: movb %r8b, 3(%rdi) +; CHECK-BASELINE-NEXT: movb %r10b, 2(%rdi) +; CHECK-BASELINE-NEXT: movb %r11b, 1(%rdi) +; CHECK-BASELINE-NEXT: movb %sil, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: popq %r12 +; CHECK-BASELINE-NEXT: popq %r13 +; CHECK-BASELINE-NEXT: popq %r14 +; CHECK-BASELINE-NEXT: popq %r15 +; CHECK-BASELINE-NEXT: popq %rbp +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v8i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbp +; CHECK-SSE-NEXT: pushq %r15 +; CHECK-SSE-NEXT: pushq %r14 +; CHECK-SSE-NEXT: pushq %r13 +; CHECK-SSE-NEXT: pushq %r12 +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movl %ecx, %r10d +; CHECK-SSE-NEXT: movl %edx, %r11d +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: xorb %bpl, %sil +; CHECK-SSE-NEXT: xorb %r13b, %r11b +; CHECK-SSE-NEXT: xorb %r12b, %r10b +; CHECK-SSE-NEXT: xorb %r15b, %r8b +; CHECK-SSE-NEXT: xorb %r14b, %r9b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: xorb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: xorb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: xorb %bl, %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r9b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r8b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: xorb %bpl, %sil +; CHECK-SSE-NEXT: xorb %r13b, %r11b +; CHECK-SSE-NEXT: xorb %r12b, %r10b +; CHECK-SSE-NEXT: xorb %r15b, %r8b +; CHECK-SSE-NEXT: xorb %r14b, %r9b +; CHECK-SSE-NEXT: xorb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: xorb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: xorb %bl, %al +; CHECK-SSE-NEXT: movb %al, 7(%rdi) +; CHECK-SSE-NEXT: movb %cl, 6(%rdi) +; CHECK-SSE-NEXT: movb %dl, 5(%rdi) +; CHECK-SSE-NEXT: movb %r9b, 4(%rdi) +; CHECK-SSE-NEXT: movb %r8b, 3(%rdi) +; CHECK-SSE-NEXT: movb %r10b, 2(%rdi) +; CHECK-SSE-NEXT: movb %r11b, 1(%rdi) +; CHECK-SSE-NEXT: movb %sil, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: popq %r12 +; CHECK-SSE-NEXT: popq %r13 +; CHECK-SSE-NEXT: popq %r14 +; CHECK-SSE-NEXT: popq %r15 +; CHECK-SSE-NEXT: popq %rbp +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v8i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <8 x i8> %x, %y + %n1 = and <8 x i8> %n0, %mask + %r = xor <8 x i8> %n1, %y + ret <8 x i8> %r +} + +define <4 x i16> @in_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v4i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: xorl %r10d, %r8d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: xorl %r11d, %ecx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: xorl %eax, %edx +; CHECK-BASELINE-NEXT: xorl %r9d, %esi +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r8w +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %dx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %si +; CHECK-BASELINE-NEXT: xorl %r9d, %esi +; CHECK-BASELINE-NEXT: xorl %eax, %edx +; CHECK-BASELINE-NEXT: xorl %r11d, %ecx +; CHECK-BASELINE-NEXT: xorl %r10d, %r8d +; CHECK-BASELINE-NEXT: movw %r8w, 6(%rdi) +; CHECK-BASELINE-NEXT: movw %cx, 4(%rdi) +; CHECK-BASELINE-NEXT: movw %dx, 2(%rdi) +; CHECK-BASELINE-NEXT: movw %si, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v4i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-SSE-NEXT: xorl %r10d, %r8d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-SSE-NEXT: xorl %r11d, %ecx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: xorl %eax, %edx +; CHECK-SSE-NEXT: xorl %r9d, %esi +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r8w +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %dx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %si +; CHECK-SSE-NEXT: xorl %r9d, %esi +; CHECK-SSE-NEXT: xorl %eax, %edx +; CHECK-SSE-NEXT: xorl %r11d, %ecx +; CHECK-SSE-NEXT: xorl %r10d, %r8d +; CHECK-SSE-NEXT: movw %r8w, 6(%rdi) +; CHECK-SSE-NEXT: movw %cx, 4(%rdi) +; CHECK-SSE-NEXT: movw %dx, 2(%rdi) +; CHECK-SSE-NEXT: movw %si, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v4i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <4 x i16> %x, %y + %n1 = and <4 x i16> %n0, %mask + %r = xor <4 x i16> %n1, %y + ret <4 x i16> %r +} + +define <2 x i32> @in_v2i32(<2 x i32> %x, <2 x i32> %y, <2 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v2i32: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorl %edx, %edi +; CHECK-BASELINE-NEXT: xorl %ecx, %esi +; CHECK-BASELINE-NEXT: andl %r9d, %esi +; CHECK-BASELINE-NEXT: andl %r8d, %edi +; CHECK-BASELINE-NEXT: xorl %edx, %edi +; CHECK-BASELINE-NEXT: xorl %ecx, %esi +; CHECK-BASELINE-NEXT: movl %edi, %eax +; CHECK-BASELINE-NEXT: movl %esi, %edx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v2i32: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorl %edx, %edi +; CHECK-SSE-NEXT: xorl %ecx, %esi +; CHECK-SSE-NEXT: andl %r9d, %esi +; CHECK-SSE-NEXT: andl %r8d, %edi +; CHECK-SSE-NEXT: xorl %edx, %edi +; CHECK-SSE-NEXT: xorl %ecx, %esi +; CHECK-SSE-NEXT: movl %edi, %eax +; CHECK-SSE-NEXT: movl %esi, %edx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v2i32: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <2 x i32> %x, %y + %n1 = and <2 x i32> %n0, %mask + %r = xor <2 x i32> %n1, %y + ret <2 x i32> %r +} + +define <1 x i64> @in_v1i64(<1 x i64> %x, <1 x i64> %y, <1 x i64> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v1i64: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorq %rsi, %rdi +; CHECK-BASELINE-NEXT: andq %rdx, %rdi +; CHECK-BASELINE-NEXT: xorq %rsi, %rdi +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v1i64: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorq %rsi, %rdi +; CHECK-SSE-NEXT: andq %rdx, %rdi +; CHECK-SSE-NEXT: xorq %rsi, %rdi +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v1i64: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorq %rsi, %rdi +; CHECK-SSE2-NEXT: andq %rdx, %rdi +; CHECK-SSE2-NEXT: xorq %rsi, %rdi +; CHECK-SSE2-NEXT: movq %rdi, %rax +; CHECK-SSE2-NEXT: retq + %n0 = xor <1 x i64> %x, %y + %n1 = and <1 x i64> %n0, %mask + %r = xor <1 x i64> %n1, %y + ret <1 x i64> %r +} + +; ============================================================================ ; +; 128-bit vector width +; ============================================================================ ; + +define <16 x i8> @in_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v16i8: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbp +; CHECK-BASELINE-NEXT: pushq %r15 +; CHECK-BASELINE-NEXT: pushq %r14 +; CHECK-BASELINE-NEXT: pushq %r13 +; CHECK-BASELINE-NEXT: pushq %r12 +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: xorb %al, %r9b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r9b +; CHECK-BASELINE-NEXT: xorb %al, %r9b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: xorb %r10b, %dl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-BASELINE-NEXT: xorb %r10b, %dl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: xorb %r11b, %r10b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-BASELINE-NEXT: xorb %r11b, %r10b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: xorb %bl, %r11b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-BASELINE-NEXT: xorb %bl, %r11b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: xorb %bpl, %bl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-BASELINE-NEXT: xorb %bpl, %bl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: xorb %r13b, %bpl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %bpl +; CHECK-BASELINE-NEXT: xorb %r13b, %bpl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: xorb %r12b, %r13b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r13b +; CHECK-BASELINE-NEXT: xorb %r12b, %r13b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: xorb %r15b, %r12b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r12b +; CHECK-BASELINE-NEXT: xorb %r15b, %r12b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: xorb %r14b, %r15b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r15b +; CHECK-BASELINE-NEXT: xorb %r14b, %r15b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: xorb %sil, %r14b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r14b +; CHECK-BASELINE-NEXT: xorb %sil, %r14b +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: xorb %cl, %al +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: xorb %cl, %al +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-BASELINE-NEXT: xorb %sil, %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: xorb %sil, %cl +; CHECK-BASELINE-NEXT: movb %cl, 15(%rdi) +; CHECK-BASELINE-NEXT: movb %al, 14(%rdi) +; CHECK-BASELINE-NEXT: movb %r14b, 13(%rdi) +; CHECK-BASELINE-NEXT: movb %r15b, 12(%rdi) +; CHECK-BASELINE-NEXT: movb %r12b, 11(%rdi) +; CHECK-BASELINE-NEXT: movb %r13b, 10(%rdi) +; CHECK-BASELINE-NEXT: movb %bpl, 9(%rdi) +; CHECK-BASELINE-NEXT: movb %bl, 8(%rdi) +; CHECK-BASELINE-NEXT: movb %r11b, 7(%rdi) +; CHECK-BASELINE-NEXT: movb %r10b, 6(%rdi) +; CHECK-BASELINE-NEXT: movb %dl, 5(%rdi) +; CHECK-BASELINE-NEXT: movb %r9b, 4(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: xorb %al, %r8b +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %r8b +; CHECK-BASELINE-NEXT: xorb %al, %r8b +; CHECK-BASELINE-NEXT: movb %r8b, 3(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-BASELINE-NEXT: xorb %al, %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: xorb %al, %cl +; CHECK-BASELINE-NEXT: movb %cl, 2(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-BASELINE-NEXT: xorb %al, %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: xorb %al, %cl +; CHECK-BASELINE-NEXT: movb %cl, 1(%rdi) +; CHECK-BASELINE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-BASELINE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-BASELINE-NEXT: xorb %al, %cl +; CHECK-BASELINE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-BASELINE-NEXT: xorb %al, %cl +; CHECK-BASELINE-NEXT: movb %cl, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: popq %r12 +; CHECK-BASELINE-NEXT: popq %r13 +; CHECK-BASELINE-NEXT: popq %r14 +; CHECK-BASELINE-NEXT: popq %r15 +; CHECK-BASELINE-NEXT: popq %rbp +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v16i8: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbp +; CHECK-SSE-NEXT: pushq %r15 +; CHECK-SSE-NEXT: pushq %r14 +; CHECK-SSE-NEXT: pushq %r13 +; CHECK-SSE-NEXT: pushq %r12 +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: xorb %al, %r9b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r9b +; CHECK-SSE-NEXT: xorb %al, %r9b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: xorb %r10b, %dl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %dl +; CHECK-SSE-NEXT: xorb %r10b, %dl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: xorb %r11b, %r10b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r10b +; CHECK-SSE-NEXT: xorb %r11b, %r10b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: xorb %bl, %r11b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r11b +; CHECK-SSE-NEXT: xorb %bl, %r11b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: xorb %bpl, %bl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bl +; CHECK-SSE-NEXT: xorb %bpl, %bl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: xorb %r13b, %bpl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %bpl +; CHECK-SSE-NEXT: xorb %r13b, %bpl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: xorb %r12b, %r13b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r13b +; CHECK-SSE-NEXT: xorb %r12b, %r13b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: xorb %r15b, %r12b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r12b +; CHECK-SSE-NEXT: xorb %r15b, %r12b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: xorb %r14b, %r15b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r15b +; CHECK-SSE-NEXT: xorb %r14b, %r15b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: xorb %sil, %r14b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r14b +; CHECK-SSE-NEXT: xorb %sil, %r14b +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: xorb %cl, %al +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: xorb %cl, %al +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %sil +; CHECK-SSE-NEXT: xorb %sil, %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: xorb %sil, %cl +; CHECK-SSE-NEXT: movb %cl, 15(%rdi) +; CHECK-SSE-NEXT: movb %al, 14(%rdi) +; CHECK-SSE-NEXT: movb %r14b, 13(%rdi) +; CHECK-SSE-NEXT: movb %r15b, 12(%rdi) +; CHECK-SSE-NEXT: movb %r12b, 11(%rdi) +; CHECK-SSE-NEXT: movb %r13b, 10(%rdi) +; CHECK-SSE-NEXT: movb %bpl, 9(%rdi) +; CHECK-SSE-NEXT: movb %bl, 8(%rdi) +; CHECK-SSE-NEXT: movb %r11b, 7(%rdi) +; CHECK-SSE-NEXT: movb %r10b, 6(%rdi) +; CHECK-SSE-NEXT: movb %dl, 5(%rdi) +; CHECK-SSE-NEXT: movb %r9b, 4(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: xorb %al, %r8b +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %r8b +; CHECK-SSE-NEXT: xorb %al, %r8b +; CHECK-SSE-NEXT: movb %r8b, 3(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-SSE-NEXT: xorb %al, %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: xorb %al, %cl +; CHECK-SSE-NEXT: movb %cl, 2(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-SSE-NEXT: xorb %al, %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: xorb %al, %cl +; CHECK-SSE-NEXT: movb %cl, 1(%rdi) +; CHECK-SSE-NEXT: movb {{[0-9]+}}(%rsp), %al +; CHECK-SSE-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 4-byte Reload +; CHECK-SSE-NEXT: xorb %al, %cl +; CHECK-SSE-NEXT: andb {{[0-9]+}}(%rsp), %cl +; CHECK-SSE-NEXT: xorb %al, %cl +; CHECK-SSE-NEXT: movb %cl, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: popq %r12 +; CHECK-SSE-NEXT: popq %r13 +; CHECK-SSE-NEXT: popq %r14 +; CHECK-SSE-NEXT: popq %r15 +; CHECK-SSE-NEXT: popq %rbp +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v16i8: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <16 x i8> %x, %y + %n1 = and <16 x i8> %n0, %mask + %r = xor <16 x i8> %n1, %y + ret <16 x i8> %r +} + +define <8 x i16> @in_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v8i16: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: pushq %rbp +; CHECK-BASELINE-NEXT: pushq %r14 +; CHECK-BASELINE-NEXT: pushq %rbx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: xorl %r10d, %r9d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: xorl %r11d, %r8d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: xorl %eax, %ecx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: xorl %ebx, %esi +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %si +; CHECK-BASELINE-NEXT: xorl %ebx, %esi +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: xorl %ebx, %edx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %dx +; CHECK-BASELINE-NEXT: xorl %ebx, %edx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r14d +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-BASELINE-NEXT: xorl %eax, %ecx +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r8w +; CHECK-BASELINE-NEXT: xorl %r11d, %r8d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %r9w +; CHECK-BASELINE-NEXT: xorl %r10d, %r9d +; CHECK-BASELINE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebp +; CHECK-BASELINE-NEXT: xorw %bx, %bp +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %bp +; CHECK-BASELINE-NEXT: xorl %ebx, %ebp +; CHECK-BASELINE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-BASELINE-NEXT: xorw %ax, %bx +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %bx +; CHECK-BASELINE-NEXT: xorl %eax, %ebx +; CHECK-BASELINE-NEXT: movzwl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: xorw %r14w, %ax +; CHECK-BASELINE-NEXT: andw {{[0-9]+}}(%rsp), %ax +; CHECK-BASELINE-NEXT: xorl %r14d, %eax +; CHECK-BASELINE-NEXT: movw %ax, 14(%rdi) +; CHECK-BASELINE-NEXT: movw %bx, 12(%rdi) +; CHECK-BASELINE-NEXT: movw %bp, 10(%rdi) +; CHECK-BASELINE-NEXT: movw %r9w, 8(%rdi) +; CHECK-BASELINE-NEXT: movw %r8w, 6(%rdi) +; CHECK-BASELINE-NEXT: movw %cx, 4(%rdi) +; CHECK-BASELINE-NEXT: movw %dx, 2(%rdi) +; CHECK-BASELINE-NEXT: movw %si, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: popq %rbx +; CHECK-BASELINE-NEXT: popq %r14 +; CHECK-BASELINE-NEXT: popq %rbp +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v8i16: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: pushq %rbp +; CHECK-SSE-NEXT: pushq %r14 +; CHECK-SSE-NEXT: pushq %rbx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-SSE-NEXT: xorl %r10d, %r9d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-SSE-NEXT: xorl %r11d, %r8d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: xorl %eax, %ecx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: xorl %ebx, %esi +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %si +; CHECK-SSE-NEXT: xorl %ebx, %esi +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: xorl %ebx, %edx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %dx +; CHECK-SSE-NEXT: xorl %ebx, %edx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %r14d +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %cx +; CHECK-SSE-NEXT: xorl %eax, %ecx +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r8w +; CHECK-SSE-NEXT: xorl %r11d, %r8d +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %r9w +; CHECK-SSE-NEXT: xorl %r10d, %r9d +; CHECK-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebp +; CHECK-SSE-NEXT: xorw %bx, %bp +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %bp +; CHECK-SSE-NEXT: xorl %ebx, %ebp +; CHECK-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %ebx +; CHECK-SSE-NEXT: xorw %ax, %bx +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %bx +; CHECK-SSE-NEXT: xorl %eax, %ebx +; CHECK-SSE-NEXT: movzwl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: xorw %r14w, %ax +; CHECK-SSE-NEXT: andw {{[0-9]+}}(%rsp), %ax +; CHECK-SSE-NEXT: xorl %r14d, %eax +; CHECK-SSE-NEXT: movw %ax, 14(%rdi) +; CHECK-SSE-NEXT: movw %bx, 12(%rdi) +; CHECK-SSE-NEXT: movw %bp, 10(%rdi) +; CHECK-SSE-NEXT: movw %r9w, 8(%rdi) +; CHECK-SSE-NEXT: movw %r8w, 6(%rdi) +; CHECK-SSE-NEXT: movw %cx, 4(%rdi) +; CHECK-SSE-NEXT: movw %dx, 2(%rdi) +; CHECK-SSE-NEXT: movw %si, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: popq %rbx +; CHECK-SSE-NEXT: popq %r14 +; CHECK-SSE-NEXT: popq %rbp +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v8i16: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <8 x i16> %x, %y + %n1 = and <8 x i16> %n0, %mask + %r = xor <8 x i16> %n1, %y + ret <8 x i16> %r +} + +define <4 x i32> @in_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v4i32: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r10d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %r11d +; CHECK-BASELINE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-BASELINE-NEXT: xorl %r9d, %esi +; CHECK-BASELINE-NEXT: xorl %eax, %edx +; CHECK-BASELINE-NEXT: xorl %r11d, %ecx +; CHECK-BASELINE-NEXT: xorl %r10d, %r8d +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %r8d +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %ecx +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %edx +; CHECK-BASELINE-NEXT: andl {{[0-9]+}}(%rsp), %esi +; CHECK-BASELINE-NEXT: xorl %r9d, %esi +; CHECK-BASELINE-NEXT: xorl %eax, %edx +; CHECK-BASELINE-NEXT: xorl %r11d, %ecx +; CHECK-BASELINE-NEXT: xorl %r10d, %r8d +; CHECK-BASELINE-NEXT: movl %r8d, 12(%rdi) +; CHECK-BASELINE-NEXT: movl %ecx, 8(%rdi) +; CHECK-BASELINE-NEXT: movl %edx, 4(%rdi) +; CHECK-BASELINE-NEXT: movl %esi, (%rdi) +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v4i32: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl {{[0-9]+}}(%rsp), %eax +; CHECK-SSE-NEXT: movl %eax, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %r9d, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %r8d, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %ecx, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %edx, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movl %esi, -{{[0-9]+}}(%rsp) +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; CHECK-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: movss {{.*#+}} xmm4 = mem[0],zero,zero,zero +; CHECK-SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1] +; CHECK-SSE-NEXT: movlhps {{.*#+}} xmm4 = xmm4[0],xmm1[0] +; CHECK-SSE-NEXT: xorps %xmm3, %xmm4 +; CHECK-SSE-NEXT: andps %xmm2, %xmm4 +; CHECK-SSE-NEXT: xorps %xmm3, %xmm4 +; CHECK-SSE-NEXT: movaps %xmm4, (%rdi) +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v4i32: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <4 x i32> %x, %y + %n1 = and <4 x i32> %n0, %mask + %r = xor <4 x i32> %n1, %y + ret <4 x i32> %r +} + +define <2 x i64> @in_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) nounwind { +; CHECK-BASELINE-LABEL: in_v2i64: +; CHECK-BASELINE: # %bb.0: +; CHECK-BASELINE-NEXT: xorq %rdx, %rdi +; CHECK-BASELINE-NEXT: xorq %rcx, %rsi +; CHECK-BASELINE-NEXT: andq %r9, %rsi +; CHECK-BASELINE-NEXT: andq %r8, %rdi +; CHECK-BASELINE-NEXT: xorq %rdx, %rdi +; CHECK-BASELINE-NEXT: xorq %rcx, %rsi +; CHECK-BASELINE-NEXT: movq %rdi, %rax +; CHECK-BASELINE-NEXT: movq %rsi, %rdx +; CHECK-BASELINE-NEXT: retq +; +; CHECK-SSE-LABEL: in_v2i64: +; CHECK-SSE: # %bb.0: +; CHECK-SSE-NEXT: xorq %rdx, %rdi +; CHECK-SSE-NEXT: xorq %rcx, %rsi +; CHECK-SSE-NEXT: andq %r9, %rsi +; CHECK-SSE-NEXT: andq %r8, %rdi +; CHECK-SSE-NEXT: xorq %rdx, %rdi +; CHECK-SSE-NEXT: xorq %rcx, %rsi +; CHECK-SSE-NEXT: movq %rdi, %rax +; CHECK-SSE-NEXT: movq %rsi, %rdx +; CHECK-SSE-NEXT: retq +; +; CHECK-SSE2-LABEL: in_v2i64: +; CHECK-SSE2: # %bb.0: +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: andps %xmm2, %xmm0 +; CHECK-SSE2-NEXT: xorps %xmm1, %xmm0 +; CHECK-SSE2-NEXT: retq + %n0 = xor <2 x i64> %x, %y + %n1 = and <2 x i64> %n0, %mask + %r = xor <2 x i64> %n1, %y + ret <2 x i64> %r +}