Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1779,6 +1779,17 @@ Depth + 1)) return true; + // Attempt to avoid multi-use ops if we don't need anything from them. + if (!DemandedSrcBits.isAllOnesValue() || + !DemandedSrcElts.isAllOnesValue()) { + if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( + Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { + SDValue NewOp = + TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); + return TLO.CombineTo(Op, NewOp); + } + } + Known = Known2; if (BitWidth > EltBitWidth) Known = Known.zext(BitWidth, false /* => any extend */); Index: test/CodeGen/AArch64/vecreduce-and-legalization.ll =================================================================== --- test/CodeGen/AArch64/vecreduce-and-legalization.ll +++ test/CodeGen/AArch64/vecreduce-and-legalization.ll @@ -105,17 +105,17 @@ ; CHECK-NEXT: mov v0.b[14], w8 ; CHECK-NEXT: mov v0.b[15], w8 ; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: and v0.8b, v0.8b, v1.8b -; CHECK-NEXT: umov w8, v0.b[1] -; CHECK-NEXT: umov w9, v0.b[0] +; CHECK-NEXT: and v1.8b, v0.8b, v1.8b +; CHECK-NEXT: umov w8, v1.b[1] +; CHECK-NEXT: umov w9, v1.b[0] ; CHECK-NEXT: and w8, w9, w8 -; CHECK-NEXT: umov w9, v0.b[2] +; CHECK-NEXT: umov w9, v1.b[2] ; CHECK-NEXT: and w8, w8, w9 -; CHECK-NEXT: umov w9, v0.b[3] +; CHECK-NEXT: umov w9, v1.b[3] ; CHECK-NEXT: and w8, w8, w9 -; CHECK-NEXT: umov w9, v0.b[4] +; CHECK-NEXT: umov w9, v1.b[4] ; CHECK-NEXT: and w8, w8, w9 -; CHECK-NEXT: umov w9, v0.b[5] +; CHECK-NEXT: umov w9, v1.b[5] ; CHECK-NEXT: and w8, w8, w9 ; CHECK-NEXT: umov w9, v0.b[6] ; CHECK-NEXT: and w8, w8, w9 @@ -132,9 +132,9 @@ ; CHECK-NEXT: mov w8, #-1 ; CHECK-NEXT: mov v0.s[3], w8 ; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v1.8b, v0.8b, v1.8b ; CHECK-NEXT: mov w8, v0.s[1] -; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w9, s1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %b = call i32 @llvm.experimental.vector.reduce.and.v3i32(<3 x i32> %a) Index: test/CodeGen/ARM/dagcombine-anyexttozeroext.ll =================================================================== --- test/CodeGen/ARM/dagcombine-anyexttozeroext.ll +++ test/CodeGen/ARM/dagcombine-anyexttozeroext.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple armv7 %s -o - | FileCheck %s define float @f(<4 x i16>* nocapture %in) { @@ -63,8 +64,8 @@ ret <4 x i32> %13 } +; FIXME: The vmov.u + sxt can convert to a vmov.s define float @i(<4 x i16>* nocapture %in) { - ; FIXME: The vmov.u + sxt can convert to a vmov.s ; CHECK-LABEL: i: ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] @@ -95,8 +96,8 @@ ret float %3 } -define float @k(<8 x i8>* nocapture %in) { ; FIXME: The vmov.u + sxt can convert to a vmov.s +define float @k(<8 x i8>* nocapture %in) { ; CHECK-LABEL: k: ; CHECK: @ %bb.0: ; CHECK-NEXT: vldr d16, [r0] @@ -113,17 +114,14 @@ } define float @KnownUpperZero(<4 x i16> %v) { -; FIXME: uxtb are not required ; CHECK-LABEL: KnownUpperZero: ; CHECK: @ %bb.0: -; CHECK-NEXT: vmov.i16 d16, #0x3 -; CHECK-NEXT: vmov d17, r0, r1 -; CHECK-NEXT: vand d16, d17, d16 +; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vmov.u16 r0, d16[0] ; CHECK-NEXT: vmov.u16 r1, d16[3] -; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: and r0, r0, #3 ; CHECK-NEXT: vmov s0, r0 -; CHECK-NEXT: uxtb r0, r1 +; CHECK-NEXT: and r0, r1, #3 ; CHECK-NEXT: vmov s2, r0 ; CHECK-NEXT: vcvt.f32.s32 s0, s0 ; CHECK-NEXT: vcvt.f32.s32 s2, s2 Index: test/CodeGen/WebAssembly/simd-arith.ll =================================================================== --- test/CodeGen/WebAssembly/simd-arith.ll +++ test/CodeGen/WebAssembly/simd-arith.ll @@ -90,18 +90,18 @@ ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} -; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] +; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes ; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} -; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <16 x i8> @shl_vec_v16i8(<16 x i8> %v, <16 x i8> %x) { %a = shl <16 x i8> %v, %x @@ -126,18 +126,18 @@ ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} -; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] +; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes -; SIMD128: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 15{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 15, $pop[[L2]]{{$}} +; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <16 x i8> @shr_s_vec_v16i8(<16 x i8> %v, <16 x i8> %x) { %a = ashr <16 x i8> %v, %x @@ -162,18 +162,18 @@ ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} -; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] +; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes ; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <16 x i8> @shr_u_vec_v16i8(<16 x i8> %v, <16 x i8> %x) { %a = lshr <16 x i8> %v, %x @@ -316,18 +316,18 @@ ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} -; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} +; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes ; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} -; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <8 x i16> @shl_vec_v8i16(<8 x i16> %v, <8 x i16> %x) { %a = shl <8 x i16> %v, %x @@ -351,18 +351,18 @@ ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} -; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} +; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes -; SIMD128: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 7{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 7, $pop[[L2]]{{$}} +; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <8 x i16> @shr_s_vec_v8i16(<8 x i16> %v, <8 x i16> %x) { %a = ashr <8 x i16> %v, %x @@ -386,18 +386,18 @@ ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} -; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} +; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes ; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <8 x i16> @shr_u_vec_v8i16(<8 x i16> %v, <8 x i16> %x) { %a = lshr <8 x i16> %v, %x Index: test/CodeGen/WebAssembly/simd-extended-extract.ll =================================================================== --- test/CodeGen/WebAssembly/simd-extended-extract.ll +++ test/CodeGen/WebAssembly/simd-extended-extract.ll @@ -21,16 +21,6 @@ ; CHECK: .functype foo (i32) -> () ; CHECK-NEXT: i32.load8_u 0 ; CHECK-NEXT: i32x4.splat -; CHECK-NEXT: i32.load8_u 1 -; CHECK-NEXT: i32x4.replace_lane 1 -; CHECK-NEXT: i32.const 2 -; CHECK-NEXT: i32.add -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.replace_lane 2 -; CHECK-NEXT: i32.const 3 -; CHECK-NEXT: i32.add -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.replace_lane 3 ; CHECK-NEXT: local.tee ; CHECK-NEXT: i8x16.extract_lane_s 0 ; CHECK-NEXT: f64.convert_i32_s @@ -40,6 +30,9 @@ ; CHECK-NEXT: f64.add ; CHECK-NEXT: f32.demote_f64 ; CHECK-NEXT: f32x4.splat +; CHECK-NEXT: i32.load8_u 1 +; CHECK-NEXT: i32x4.replace_lane 1 +; CHECK-NEXT: local.tee ; CHECK-NEXT: i8x16.extract_lane_s 4 ; CHECK-NEXT: f64.convert_i32_s ; CHECK-NEXT: f64.const 0x0p0 @@ -48,6 +41,11 @@ ; CHECK-NEXT: f64.add ; CHECK-NEXT: f32.demote_f64 ; CHECK-NEXT: f32x4.replace_lane 1 +; CHECK-NEXT: i32.const 2 +; CHECK-NEXT: i32.add +; CHECK-NEXT: i32.load8_u 0 +; CHECK-NEXT: i32x4.replace_lane 2 +; CHECK-NEXT: local.tee ; CHECK-NEXT: i8x16.extract_lane_s 8 ; CHECK-NEXT: f64.convert_i32_s ; CHECK-NEXT: f64.const 0x0p0 @@ -56,6 +54,10 @@ ; CHECK-NEXT: f64.add ; CHECK-NEXT: f32.demote_f64 ; CHECK-NEXT: f32x4.replace_lane 2 +; CHECK-NEXT: i32.const 3 +; CHECK-NEXT: i32.add +; CHECK-NEXT: i32.load8_u 0 +; CHECK-NEXT: i32x4.replace_lane 3 ; CHECK-NEXT: i8x16.extract_lane_s 12 ; CHECK-NEXT: f64.convert_i32_s ; CHECK-NEXT: f64.const 0x0p0 Index: test/CodeGen/X86/bitcast-vector-bool.ll =================================================================== --- test/CodeGen/X86/bitcast-vector-bool.ll +++ test/CodeGen/X86/bitcast-vector-bool.ll @@ -155,22 +155,22 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: pmovmskb %xmm0, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pextrw $1, %xmm0, %eax ; SSE2-NEXT: addb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSSE3-LABEL: bitcast_v16i8_to_v2i8: ; SSSE3: # %bb.0: ; SSSE3-NEXT: pmovmskb %xmm0, %eax ; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,u,u,u,u,u,u,u,1,u,u,u,u,u,u,u] ; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,u,u,u,u,u,u,u,1,u,u,u,u,u,u,u] +; SSSE3-NEXT: pextrw $4, %xmm0, %eax ; SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSSE3-NEXT: retq ; ; AVX12-LABEL: bitcast_v16i8_to_v2i8: @@ -323,12 +323,11 @@ ; SSE2-NEXT: packsswb %xmm1, %xmm0 ; SSE2-NEXT: pmovmskb %xmm0, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pextrw $1, %xmm0, %eax ; SSE2-NEXT: addb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSSE3-LABEL: bitcast_v16i16_to_v2i8: @@ -336,10 +335,11 @@ ; SSSE3-NEXT: packsswb %xmm1, %xmm0 ; SSSE3-NEXT: pmovmskb %xmm0, %eax ; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,u,u,u,u,u,u,u,1,u,u,u,u,u,u,u] ; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,u,u,u,u,u,u,u,1,u,u,u,u,u,u,u] +; SSSE3-NEXT: pextrw $4, %xmm0, %eax ; SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_v16i16_to_v2i8: @@ -397,7 +397,6 @@ ; SSE2-SSSE3-NEXT: shll $16, %ecx ; SSE2-SSSE3-NEXT: orl %eax, %ecx ; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %ecx ; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax ; SSE2-SSSE3-NEXT: addl %ecx, %eax ; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax @@ -411,7 +410,6 @@ ; AVX1-NEXT: shll $16, %ecx ; AVX1-NEXT: orl %eax, %ecx ; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpextrw $0, %xmm0, %ecx ; AVX1-NEXT: vpextrw $1, %xmm0, %eax ; AVX1-NEXT: addl %ecx, %eax ; AVX1-NEXT: # kill: def $ax killed $ax killed $eax @@ -420,9 +418,8 @@ ; ; AVX2-LABEL: bitcast_v32i8_to_v2i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vpmovmskb %ymm0, %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpextrw $0, %xmm0, %ecx +; AVX2-NEXT: vpmovmskb %ymm0, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm0 ; AVX2-NEXT: vpextrw $1, %xmm0, %eax ; AVX2-NEXT: addl %ecx, %eax ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax @@ -437,8 +434,8 @@ ; AVX512-NEXT: subq $32, %rsp ; AVX512-NEXT: vpmovb2m %ymm0, %k0 ; AVX512-NEXT: kmovd %k0, (%rsp) -; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512-NEXT: vpextrw $0, %xmm0, %ecx +; AVX512-NEXT: movl (%rsp), %ecx +; AVX512-NEXT: vmovd %ecx, %xmm0 ; AVX512-NEXT: vpextrw $1, %xmm0, %eax ; AVX512-NEXT: addl %ecx, %eax ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax @@ -586,12 +583,11 @@ ; SSE2-NEXT: packsswb %xmm2, %xmm0 ; SSE2-NEXT: pmovmskb %xmm0, %eax ; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] ; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: pextrw $1, %xmm0, %eax ; SSE2-NEXT: addb -{{[0-9]+}}(%rsp), %al +; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; ; SSSE3-LABEL: bitcast_v16i32_to_v2i8: @@ -601,10 +597,11 @@ ; SSSE3-NEXT: packsswb %xmm2, %xmm0 ; SSSE3-NEXT: pmovmskb %xmm0, %eax ; SSSE3-NEXT: movd %eax, %xmm0 -; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,u,u,u,u,u,u,u,1,u,u,u,u,u,u,u] ; SSSE3-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) -; SSSE3-NEXT: movb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,u,u,u,u,u,u,u,1,u,u,u,u,u,u,u] +; SSSE3-NEXT: pextrw $4, %xmm0, %eax ; SSSE3-NEXT: addb -{{[0-9]+}}(%rsp), %al +; SSSE3-NEXT: # kill: def $al killed $al killed $eax ; SSSE3-NEXT: retq ; ; AVX1-LABEL: bitcast_v16i32_to_v2i8: @@ -671,7 +668,6 @@ ; SSE2-SSSE3-NEXT: shll $16, %ecx ; SSE2-SSSE3-NEXT: orl %eax, %ecx ; SSE2-SSSE3-NEXT: movd %ecx, %xmm0 -; SSE2-SSSE3-NEXT: pextrw $0, %xmm0, %ecx ; SSE2-SSSE3-NEXT: pextrw $1, %xmm0, %eax ; SSE2-SSSE3-NEXT: addl %ecx, %eax ; SSE2-SSSE3-NEXT: # kill: def $ax killed $ax killed $eax @@ -688,7 +684,6 @@ ; AVX1-NEXT: shll $16, %ecx ; AVX1-NEXT: orl %eax, %ecx ; AVX1-NEXT: vmovd %ecx, %xmm0 -; AVX1-NEXT: vpextrw $0, %xmm0, %ecx ; AVX1-NEXT: vpextrw $1, %xmm0, %eax ; AVX1-NEXT: addl %ecx, %eax ; AVX1-NEXT: # kill: def $ax killed $ax killed $eax @@ -699,9 +694,8 @@ ; AVX2: # %bb.0: ; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX2-NEXT: vpmovmskb %ymm0, %eax -; AVX2-NEXT: vmovd %eax, %xmm0 -; AVX2-NEXT: vpextrw $0, %xmm0, %ecx +; AVX2-NEXT: vpmovmskb %ymm0, %ecx +; AVX2-NEXT: vmovd %ecx, %xmm0 ; AVX2-NEXT: vpextrw $1, %xmm0, %eax ; AVX2-NEXT: addl %ecx, %eax ; AVX2-NEXT: # kill: def $ax killed $ax killed $eax @@ -716,8 +710,8 @@ ; AVX512-NEXT: subq $32, %rsp ; AVX512-NEXT: vpmovw2m %zmm0, %k0 ; AVX512-NEXT: kmovd %k0, (%rsp) -; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX512-NEXT: vpextrw $0, %xmm0, %ecx +; AVX512-NEXT: movl (%rsp), %ecx +; AVX512-NEXT: vmovd %ecx, %xmm0 ; AVX512-NEXT: vpextrw $1, %xmm0, %eax ; AVX512-NEXT: addl %ecx, %eax ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax Index: test/CodeGen/X86/promote-vec3.ll =================================================================== --- test/CodeGen/X86/promote-vec3.ll +++ test/CodeGen/X86/promote-vec3.ll @@ -22,10 +22,10 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: pinsrb $0, {{[0-9]+}}(%esp), %xmm0 +; SSE41-NEXT: movd %xmm0, %eax ; SSE41-NEXT: pinsrb $4, {{[0-9]+}}(%esp), %xmm0 ; SSE41-NEXT: pextrw $2, %xmm0, %edx ; SSE41-NEXT: pinsrb $8, {{[0-9]+}}(%esp), %xmm0 -; SSE41-NEXT: movd %xmm0, %eax ; SSE41-NEXT: pextrw $4, %xmm0, %ecx ; SSE41-NEXT: # kill: def $ax killed $ax killed $eax ; SSE41-NEXT: # kill: def $dx killed $dx killed $edx @@ -36,11 +36,11 @@ ; AVX-32: # %bb.0: ; AVX-32-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX-32-NEXT: vpinsrb $0, {{[0-9]+}}(%esp), %xmm0, %xmm0 -; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm0 -; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm0, %xmm1 -; AVX-32-NEXT: vpextrw $2, %xmm0, %edx -; AVX-32-NEXT: vmovd %xmm1, %eax -; AVX-32-NEXT: vpextrw $4, %xmm1, %ecx +; AVX-32-NEXT: vpinsrb $4, {{[0-9]+}}(%esp), %xmm0, %xmm1 +; AVX-32-NEXT: vpinsrb $8, {{[0-9]+}}(%esp), %xmm1, %xmm2 +; AVX-32-NEXT: vmovd %xmm0, %eax +; AVX-32-NEXT: vpextrw $2, %xmm1, %edx +; AVX-32-NEXT: vpextrw $4, %xmm2, %ecx ; AVX-32-NEXT: # kill: def $ax killed $ax killed $eax ; AVX-32-NEXT: # kill: def $dx killed $dx killed $edx ; AVX-32-NEXT: # kill: def $cx killed $cx killed $ecx Index: test/CodeGen/X86/vec_smulo.ll =================================================================== --- test/CodeGen/X86/vec_smulo.ll +++ test/CodeGen/X86/vec_smulo.ll @@ -1898,7 +1898,6 @@ ; SSE2-NEXT: psrad $8, %xmm1 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] ; SSE2-NEXT: psrad $31, %xmm4 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm4 ; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 @@ -1907,20 +1906,21 @@ ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: movw %cx, 3(%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: movw %dx, 3(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 5(%rdi) +; SSE2-NEXT: movb %cl, 8(%rdi) +; SSE2-NEXT: shrl $16, %edx +; SSE2-NEXT: movb %dl, 5(%rdi) ; SSE2-NEXT: movd %xmm5, %eax ; SSE2-NEXT: movw %ax, 9(%rdi) -; SSE2-NEXT: movd %xmm6, %ecx -; SSE2-NEXT: movw %cx, 6(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 11(%rdi) -; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 8(%rdi) ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1953,7 +1953,6 @@ ; SSSE3-NEXT: psrad $8, %xmm1 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 ; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] ; SSSE3-NEXT: psrad $31, %xmm4 ; SSSE3-NEXT: pcmpeqd %xmm3, %xmm4 ; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 @@ -1962,20 +1961,21 @@ ; SSSE3-NEXT: por %xmm4, %xmm1 ; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: movw %cx, 3(%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: movw %dx, 3(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 2(%rdi) ; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 5(%rdi) +; SSSE3-NEXT: movb %cl, 8(%rdi) +; SSSE3-NEXT: shrl $16, %edx +; SSSE3-NEXT: movb %dl, 5(%rdi) ; SSSE3-NEXT: movd %xmm5, %eax ; SSSE3-NEXT: movw %ax, 9(%rdi) -; SSSE3-NEXT: movd %xmm6, %ecx -; SSSE3-NEXT: movw %cx, 6(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 11(%rdi) -; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 8(%rdi) ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; Index: test/CodeGen/X86/vec_umulo.ll =================================================================== --- test/CodeGen/X86/vec_umulo.ll +++ test/CodeGen/X86/vec_umulo.ll @@ -1677,33 +1677,33 @@ ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm5, %xmm5 +; SSE2-NEXT: pxor %xmm5, %xmm3 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[3,1,2,3] ; SSE2-NEXT: psrld $24, %xmm1 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 -; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: pxor %xmm5, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: movw %cx, 3(%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: movw %dx, 3(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 2(%rdi) ; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 5(%rdi) -; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: movb %cl, 8(%rdi) +; SSE2-NEXT: shrl $16, %edx +; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: movd %xmm6, %eax ; SSE2-NEXT: movw %ax, 9(%rdi) -; SSE2-NEXT: movd %xmm6, %ecx -; SSE2-NEXT: movw %cx, 6(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 11(%rdi) -; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 8(%rdi) ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1721,33 +1721,33 @@ ; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] ; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5 +; SSSE3-NEXT: pxor %xmm5, %xmm3 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm2[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[3,1,2,3] ; SSSE3-NEXT: psrld $24, %xmm1 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 -; SSSE3-NEXT: pxor %xmm4, %xmm3 -; SSSE3-NEXT: pxor %xmm4, %xmm1 +; SSSE3-NEXT: pxor %xmm5, %xmm1 ; SSSE3-NEXT: por %xmm3, %xmm1 ; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: movw %cx, 3(%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: movw %dx, 3(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 2(%rdi) ; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 5(%rdi) -; SSSE3-NEXT: movd %xmm5, %eax +; SSSE3-NEXT: movb %cl, 8(%rdi) +; SSSE3-NEXT: shrl $16, %edx +; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: movd %xmm6, %eax ; SSSE3-NEXT: movw %ax, 9(%rdi) -; SSSE3-NEXT: movd %xmm6, %ecx -; SSSE3-NEXT: movw %cx, 6(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 11(%rdi) -; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 8(%rdi) ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; Index: test/CodeGen/X86/vector-reduce-mul-widen.ll =================================================================== --- test/CodeGen/X86/vector-reduce-mul-widen.ll +++ test/CodeGen/X86/vector-reduce-mul-widen.ll @@ -1739,14 +1739,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; @@ -1954,14 +1951,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; @@ -2223,14 +2217,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; @@ -2595,14 +2586,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: pmullw %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: packuswb %xmm3, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; Index: test/CodeGen/X86/vector-reduce-mul.ll =================================================================== --- test/CodeGen/X86/vector-reduce-mul.ll +++ test/CodeGen/X86/vector-reduce-mul.ll @@ -1688,14 +1688,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; @@ -1903,14 +1900,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; @@ -2172,14 +2166,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pmullw %xmm2, %xmm0 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm0, %xmm1 +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; @@ -2544,14 +2535,11 @@ ; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE2-NEXT: pmullw %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: packuswb %xmm3, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: packuswb %xmm0, %xmm2 -; SSE2-NEXT: movd %xmm2, %eax +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: packuswb %xmm3, %xmm0 +; SSE2-NEXT: psrlw $8, %xmm0 +; SSE2-NEXT: pmullw %xmm1, %xmm0 +; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: # kill: def $al killed $al killed $eax ; SSE2-NEXT: retq ; Index: test/CodeGen/X86/xor.ll =================================================================== --- test/CodeGen/X86/xor.ll +++ test/CodeGen/X86/xor.ll @@ -407,13 +407,10 @@ ; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; X32-NEXT: pandn {{\.LCPI.*}}, %xmm0 -; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; X32-NEXT: movd %xmm1, %ecx ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; X32-NEXT: movd %xmm0, %edx -; X32-NEXT: xorl $1, %edx +; X32-NEXT: movd %xmm0, %ecx ; X32-NEXT: xorl %eax, %eax -; X32-NEXT: orl %ecx, %edx +; X32-NEXT: cmpl $1, %ecx ; X32-NEXT: setne %al ; X32-NEXT: retl ;