diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1836,6 +1836,17 @@ Depth + 1)) return true; + // Attempt to avoid multi-use ops if we don't need anything from them. + if (!DemandedSrcBits.isAllOnesValue() || + !DemandedSrcElts.isAllOnesValue()) { + if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( + Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { + SDValue NewOp = + TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); + return TLO.CombineTo(Op, NewOp); + } + } + Known = Known2; if (BitWidth > EltBitWidth) Known = Known.zext(BitWidth, false /* => any extend */); diff --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll --- a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll +++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll @@ -105,17 +105,17 @@ ; CHECK-NEXT: mov v0.b[14], w8 ; CHECK-NEXT: mov v0.b[15], w8 ; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: and v0.8b, v0.8b, v1.8b -; CHECK-NEXT: umov w8, v0.b[1] -; CHECK-NEXT: umov w9, v0.b[0] +; CHECK-NEXT: and v1.8b, v0.8b, v1.8b +; CHECK-NEXT: umov w8, v1.b[1] +; CHECK-NEXT: umov w9, v1.b[0] ; CHECK-NEXT: and w8, w9, w8 -; CHECK-NEXT: umov w9, v0.b[2] +; CHECK-NEXT: umov w9, v1.b[2] ; CHECK-NEXT: and w8, w8, w9 -; CHECK-NEXT: umov w9, v0.b[3] +; CHECK-NEXT: umov w9, v1.b[3] ; CHECK-NEXT: and w8, w8, w9 -; CHECK-NEXT: umov w9, v0.b[4] +; CHECK-NEXT: umov w9, v1.b[4] ; CHECK-NEXT: and w8, w8, w9 -; CHECK-NEXT: umov w9, v0.b[5] +; CHECK-NEXT: umov w9, v1.b[5] ; CHECK-NEXT: and w8, w8, w9 ; CHECK-NEXT: umov w9, v0.b[6] ; CHECK-NEXT: and w8, w8, w9 @@ -132,9 +132,9 @@ ; CHECK-NEXT: mov w8, #-1 ; CHECK-NEXT: mov v0.s[3], w8 ; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: and v0.8b, v0.8b, v1.8b +; CHECK-NEXT: and v1.8b, v0.8b, v1.8b ; CHECK-NEXT: mov w8, v0.s[1] -; CHECK-NEXT: fmov w9, s0 +; CHECK-NEXT: fmov w9, s1 ; CHECK-NEXT: and w0, w9, w8 ; CHECK-NEXT: ret %b = call i32 @llvm.experimental.vector.reduce.and.v3i32(<3 x i32> %a) diff --git a/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll b/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll --- a/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll +++ b/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll @@ -113,17 +113,14 @@ } define float @KnownUpperZero(<4 x i16> %v) { -; FIXME: uxtb are not required ; CHECK-LABEL: KnownUpperZero: ; CHECK: @ %bb.0: -; CHECK-NEXT: vmov.i16 d16, #0x3 -; CHECK-NEXT: vmov d17, r0, r1 -; CHECK-NEXT: vand d16, d17, d16 +; CHECK-NEXT: vmov d16, r0, r1 ; CHECK-NEXT: vmov.u16 r0, d16[0] ; CHECK-NEXT: vmov.u16 r1, d16[3] -; CHECK-NEXT: uxtb r0, r0 +; CHECK-NEXT: and r0, r0, #3 ; CHECK-NEXT: vmov s0, r0 -; CHECK-NEXT: uxtb r0, r1 +; CHECK-NEXT: and r0, r1, #3 ; CHECK-NEXT: vmov s2, r0 ; CHECK-NEXT: vcvt.f32.s32 s0, s0 ; CHECK-NEXT: vcvt.f32.s32 s2, s2 diff --git a/llvm/test/CodeGen/Thumb2/lsll0.ll b/llvm/test/CodeGen/Thumb2/lsll0.ll --- a/llvm/test/CodeGen/Thumb2/lsll0.ll +++ b/llvm/test/CodeGen/Thumb2/lsll0.ll @@ -5,18 +5,17 @@ ; CHECK-LABEL: _Z4loopPxS_iS_i: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vmov r1, s2 -; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: sxth r1, r1 +; CHECK-NEXT: vmov r2, s2 +; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: sxth r2, r2 ; CHECK-NEXT: rsbs r1, r1, #0 ; CHECK-NEXT: rsbs r2, r2, #0 ; CHECK-NEXT: sxth r1, r1 ; CHECK-NEXT: sxth r2, r2 -; CHECK-NEXT: asr.w r12, r1, #31 -; CHECK-NEXT: asrs r3, r2, #31 -; CHECK-NEXT: strd r2, r3, [r0] -; CHECK-NEXT: strd r1, r12, [r0, #8] +; CHECK-NEXT: asrs r3, r1, #31 +; CHECK-NEXT: asr.w r12, r2, #31 +; CHECK-NEXT: strd r1, r3, [r0] +; CHECK-NEXT: strd r2, r12, [r0, #8] ; CHECK-NEXT: bx lr entry: %wide.load = load <2 x i64>, <2 x i64>* undef, align 8 diff --git a/llvm/test/CodeGen/Thumb2/mve-vld3.ll b/llvm/test/CodeGen/Thumb2/mve-vld3.ll --- a/llvm/test/CodeGen/Thumb2/mve-vld3.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vld3.ll @@ -7,23 +7,23 @@ ; CHECK-LABEL: vld3_v2i32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: ldrd r2, r3, [r0, #16] -; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] ; CHECK-NEXT: vmov.32 q2[0], r2 -; CHECK-NEXT: vmov.f64 d0, d2 +; CHECK-NEXT: vmov.f64 d2, d0 ; CHECK-NEXT: vmov.32 q2[2], r3 -; CHECK-NEXT: vmov.f32 s12, s5 +; CHECK-NEXT: vmov.32 r0, q0[2] +; CHECK-NEXT: vmov.f32 s12, s1 +; CHECK-NEXT: vmov.f32 s6, s3 ; CHECK-NEXT: vmov.f32 s14, s8 -; CHECK-NEXT: vmov.f32 s2, s7 -; CHECK-NEXT: vmov.f32 s8, s6 -; CHECK-NEXT: vmov r3, s12 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov r0, s2 -; CHECK-NEXT: add r0, r2 -; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: add r0, r2 -; CHECK-NEXT: vmov r2, s0 +; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: vmov r12, s6 +; CHECK-NEXT: vdup.32 q1, r0 +; CHECK-NEXT: vmov r0, s14 +; CHECK-NEXT: add r0, r12 +; CHECK-NEXT: add r0, r3 +; CHECK-NEXT: vmov r3, s0 ; CHECK-NEXT: add r2, r3 -; CHECK-NEXT: vmov r3, s8 +; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: add r2, r3 ; CHECK-NEXT: strd r2, r0, [r1] ; CHECK-NEXT: bx lr @@ -258,27 +258,27 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .pad #8 ; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: vldrh.u32 q0, [r0] ; CHECK-NEXT: ldr r2, [r0, #8] -; CHECK-NEXT: vldrh.u32 q1, [r0] ; CHECK-NEXT: mov r3, sp ; CHECK-NEXT: str r2, [sp] -; CHECK-NEXT: vldrh.u32 q2, [r3] -; CHECK-NEXT: vmov.f64 d0, d2 -; CHECK-NEXT: vmov.f32 s12, s5 -; CHECK-NEXT: vmov.f32 s2, s7 -; CHECK-NEXT: vmov.f32 s14, s8 -; CHECK-NEXT: vmov.f32 s4, s6 -; CHECK-NEXT: vmov.f32 s6, s9 -; CHECK-NEXT: vmov r0, s2 -; CHECK-NEXT: vmov r2, s14 +; CHECK-NEXT: vmov.f64 d2, d0 +; CHECK-NEXT: vmov.f32 s6, s3 +; CHECK-NEXT: vmov.f32 s8, s1 +; CHECK-NEXT: vmov.f64 d6, d1 +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vldrh.u32 q1, [r3] +; CHECK-NEXT: vmov.f32 s10, s4 +; CHECK-NEXT: vmov.f32 s14, s5 +; CHECK-NEXT: vmov r2, s10 ; CHECK-NEXT: add r0, r2 -; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: vmov r2, s14 ; CHECK-NEXT: add r0, r2 ; CHECK-NEXT: strh r0, [r1, #2] -; CHECK-NEXT: vmov r0, s0 -; CHECK-NEXT: vmov r2, s12 +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: add r0, r2 -; CHECK-NEXT: vmov r2, s4 +; CHECK-NEXT: vmov r2, s12 ; CHECK-NEXT: add r0, r2 ; CHECK-NEXT: strh r0, [r1] ; CHECK-NEXT: add sp, #8 diff --git a/llvm/test/CodeGen/Thumb2/mve-vld4.ll b/llvm/test/CodeGen/Thumb2/mve-vld4.ll --- a/llvm/test/CodeGen/Thumb2/mve-vld4.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vld4.ll @@ -6,33 +6,32 @@ define void @vld4_v2i32(<8 x i32> *%src, <2 x i32> *%dst) { ; CHECK-LABEL: vld4_v2i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vldrw.u32 q0, [r0] -; CHECK-NEXT: vldrw.u32 q2, [r0, #16] -; CHECK-NEXT: vmov.f32 s4, s3 -; CHECK-NEXT: vmov.f64 d6, d1 -; CHECK-NEXT: vmov.f32 s6, s11 -; CHECK-NEXT: vmov.f32 s14, s10 -; CHECK-NEXT: vmov.f32 s16, s1 -; CHECK-NEXT: vmov.f32 s2, s8 -; CHECK-NEXT: vmov.f32 s18, s9 -; CHECK-NEXT: vmov r0, s6 -; CHECK-NEXT: vmov r2, s14 -; CHECK-NEXT: vmov r3, s2 +; CHECK-NEXT: vldrw.u32 q1, [r0, #16] +; CHECK-NEXT: vmov.f32 s8, s3 +; CHECK-NEXT: vmov.32 r3, q1[0] +; CHECK-NEXT: vmov.f32 s10, s7 +; CHECK-NEXT: vmov r2, s6 +; CHECK-NEXT: vmov.f32 s12, s1 +; CHECK-NEXT: vmov.f32 s14, s5 +; CHECK-NEXT: vdup.32 q1, r3 +; CHECK-NEXT: vmov r3, s6 +; CHECK-NEXT: vmov r0, s10 ; CHECK-NEXT: add r0, r2 -; CHECK-NEXT: vmov r2, s18 +; CHECK-NEXT: vmov r2, s14 ; CHECK-NEXT: add r2, r3 -; CHECK-NEXT: vmov r3, s12 -; CHECK-NEXT: add.w r12, r2, r0 +; CHECK-NEXT: vmov r3, s0 +; CHECK-NEXT: add r0, r2 +; CHECK-NEXT: str r0, [r1, #4] +; CHECK-NEXT: vmov.32 r2, q0[2] +; CHECK-NEXT: vmov r0, s8 +; CHECK-NEXT: vdup.32 q1, r2 ; CHECK-NEXT: vmov r2, s4 -; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: add r0, r2 +; CHECK-NEXT: vmov r2, s12 ; CHECK-NEXT: add r2, r3 -; CHECK-NEXT: vmov r3, s16 -; CHECK-NEXT: add r0, r3 ; CHECK-NEXT: add r0, r2 -; CHECK-NEXT: strd r0, r12, [r1] -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: str r0, [r1] ; CHECK-NEXT: bx lr entry: %l1 = load <8 x i32>, <8 x i32>* %src, align 4 diff --git a/llvm/test/CodeGen/WebAssembly/simd-arith.ll b/llvm/test/CodeGen/WebAssembly/simd-arith.ll --- a/llvm/test/CodeGen/WebAssembly/simd-arith.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-arith.ll @@ -161,18 +161,18 @@ ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} -; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] +; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes ; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} -; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <16 x i8> @shl_vec_v16i8(<16 x i8> %v, <16 x i8> %x) { %a = shl <16 x i8> %v, %x @@ -197,18 +197,18 @@ ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} -; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] +; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes -; SIMD128: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 15{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 15, $pop[[L2]]{{$}} +; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <16 x i8> @shr_s_vec_v16i8(<16 x i8> %v, <16 x i8> %x) { %a = ashr <16 x i8> %v, %x @@ -233,18 +233,18 @@ ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} -; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] +; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes ; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <16 x i8> @shr_u_vec_v16i8(<16 x i8> %v, <16 x i8> %x) { %a = lshr <16 x i8> %v, %x @@ -471,18 +471,18 @@ ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} -; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} +; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes ; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} -; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <8 x i16> @shl_vec_v8i16(<8 x i16> %v, <8 x i16> %x) { %a = shl <8 x i16> %v, %x @@ -506,18 +506,18 @@ ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} -; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} +; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes -; SIMD128: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 7{{$}} -; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 7, $pop[[L2]]{{$}} +; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <8 x i16> @shr_s_vec_v8i16(<8 x i16> %v, <8 x i16> %x) { %a = ashr <8 x i16> %v, %x @@ -541,18 +541,18 @@ ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} -; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}} -; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}} -; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}} -; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}} +; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} +; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes ; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} -; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}} -; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} +; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} +; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} +; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define <8 x i16> @shr_u_vec_v8i16(<8 x i16> %v, <8 x i16> %x) { %a = lshr <8 x i16> %v, %x diff --git a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll --- a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll @@ -21,16 +21,6 @@ ; CHECK: .functype foo (i32) -> () ; CHECK-NEXT: i32.load8_u 0 ; CHECK-NEXT: i32x4.splat -; CHECK-NEXT: i32.load8_u 1 -; CHECK-NEXT: i32x4.replace_lane 1 -; CHECK-NEXT: i32.const 2 -; CHECK-NEXT: i32.add -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.replace_lane 2 -; CHECK-NEXT: i32.const 3 -; CHECK-NEXT: i32.add -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.replace_lane 3 ; CHECK-NEXT: local.tee ; CHECK-NEXT: i8x16.extract_lane_s 0 ; CHECK-NEXT: f64.convert_i32_s @@ -40,6 +30,9 @@ ; CHECK-NEXT: f64.add ; CHECK-NEXT: f32.demote_f64 ; CHECK-NEXT: f32x4.splat +; CHECK-NEXT: i32.load8_u 1 +; CHECK-NEXT: i32x4.replace_lane 1 +; CHECK-NEXT: local.tee ; CHECK-NEXT: i8x16.extract_lane_s 4 ; CHECK-NEXT: f64.convert_i32_s ; CHECK-NEXT: f64.const 0x0p0 @@ -48,6 +41,11 @@ ; CHECK-NEXT: f64.add ; CHECK-NEXT: f32.demote_f64 ; CHECK-NEXT: f32x4.replace_lane 1 +; CHECK-NEXT: i32.const 2 +; CHECK-NEXT: i32.add +; CHECK-NEXT: i32.load8_u 0 +; CHECK-NEXT: i32x4.replace_lane 2 +; CHECK-NEXT: local.tee ; CHECK-NEXT: i8x16.extract_lane_s 8 ; CHECK-NEXT: f64.convert_i32_s ; CHECK-NEXT: f64.const 0x0p0 @@ -56,6 +54,10 @@ ; CHECK-NEXT: f64.add ; CHECK-NEXT: f32.demote_f64 ; CHECK-NEXT: f32x4.replace_lane 2 +; CHECK-NEXT: i32.const 3 +; CHECK-NEXT: i32.add +; CHECK-NEXT: i32.load8_u 0 +; CHECK-NEXT: i32x4.replace_lane 3 ; CHECK-NEXT: i8x16.extract_lane_s 12 ; CHECK-NEXT: f64.convert_i32_s ; CHECK-NEXT: f64.const 0x0p0 diff --git a/llvm/test/CodeGen/X86/promote-vec3.ll b/llvm/test/CodeGen/X86/promote-vec3.ll --- a/llvm/test/CodeGen/X86/promote-vec3.ll +++ b/llvm/test/CodeGen/X86/promote-vec3.ll @@ -8,13 +8,9 @@ define <3 x i16> @zext_i8(<3 x i8>) { ; SSE3-LABEL: zext_i8: ; SSE3: # %bb.0: +; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %edx -; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; SSE3-NEXT: movd %eax, %xmm0 -; SSE3-NEXT: pinsrw $1, %edx, %xmm0 -; SSE3-NEXT: pinsrw $2, %ecx, %xmm0 -; SSE3-NEXT: movd %xmm0, %eax ; SSE3-NEXT: # kill: def $ax killed $ax killed $eax ; SSE3-NEXT: # kill: def $dx killed $dx killed $edx ; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll --- a/llvm/test/CodeGen/X86/vec_smulo.ll +++ b/llvm/test/CodeGen/X86/vec_smulo.ll @@ -1798,7 +1798,7 @@ ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: movd %xmm0, %ecx ; SSE2-NEXT: movw %cx, 6(%rdi) ; SSE2-NEXT: movd %xmm2, %edx @@ -1853,7 +1853,7 @@ ; SSSE3-NEXT: por %xmm3, %xmm1 ; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSSE3-NEXT: movd %xmm0, %ecx ; SSSE3-NEXT: movw %cx, 6(%rdi) ; SSSE3-NEXT: movd %xmm2, %edx diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll --- a/llvm/test/CodeGen/X86/vec_umulo.ll +++ b/llvm/test/CodeGen/X86/vec_umulo.ll @@ -1587,7 +1587,7 @@ ; SSE2-NEXT: por %xmm3, %xmm0 ; SSE2-NEXT: movd %xmm2, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; SSE2-NEXT: movd %xmm2, %ecx ; SSE2-NEXT: movw %cx, 6(%rdi) ; SSE2-NEXT: movd %xmm1, %edx @@ -1631,7 +1631,7 @@ ; SSSE3-NEXT: por %xmm3, %xmm0 ; SSSE3-NEXT: movd %xmm2, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; SSSE3-NEXT: movd %xmm2, %ecx ; SSSE3-NEXT: movw %cx, 6(%rdi) ; SSSE3-NEXT: movd %xmm1, %edx diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll --- a/llvm/test/CodeGen/X86/xor.ll +++ b/llvm/test/CodeGen/X86/xor.ll @@ -407,13 +407,10 @@ ; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; X32-NEXT: pandn {{\.LCPI.*}}, %xmm0 -; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; X32-NEXT: movd %xmm1, %ecx ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; X32-NEXT: movd %xmm0, %edx -; X32-NEXT: xorl $1, %edx +; X32-NEXT: movd %xmm0, %ecx ; X32-NEXT: xorl %eax, %eax -; X32-NEXT: orl %ecx, %edx +; X32-NEXT: cmpl $1, %ecx ; X32-NEXT: setne %al ; X32-NEXT: retl ;