Index: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp @@ -32071,6 +32071,14 @@ if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR) return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0)); + // Share broadcast with the longest vector and extract low subvector (free). + for (SDNode *User : Src->uses()) + if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST && + User->getValueSizeInBits(0) > VT.getSizeInBits()) { + return extractSubVector(SDValue(User, 0), 0, DAG, DL, + VT.getSizeInBits()); + } + return SDValue(); } case X86ISD::PSHUFD: Index: llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll +++ llvm/trunk/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll @@ -357,17 +357,15 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) { ; AVX-LABEL: f64i8_i32: ; AVX: # %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] -; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-NEXT: vbroadcastss {{.*#+}} ymm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: retl @@ -390,17 +388,15 @@ ; ; AVX-64-LABEL: f64i8_i32: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-64-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] -; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddb %xmm3, %xmm0, %xmm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-64-NEXT: vbroadcastss {{.*#+}} ymm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-64-NEXT: retq @@ -426,21 +422,18 @@ } -; FIXME the load should be folded with the MOVDDUP with AVX1. PR39454 define <64 x i8> @f64xi8_i64(<64 x i8> %a) { ; AVX-LABEL: f64xi8_i64: ; AVX: # %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; AVX-NEXT: vmovddup {{.*#+}} xmm3 = xmm3[0,0] -; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddb %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: retl @@ -463,17 +456,15 @@ ; ; AVX-64-LABEL: f64xi8_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = xmm3[0,0] -; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddb %xmm3, %xmm0, %xmm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-64-NEXT: retq @@ -498,6 +489,7 @@ ret <64 x i8> %res2 } + define <64 x i8> @f64xi8_i128(<64 x i8> %a) { ; AVX-LABEL: f64xi8_i128: ; AVX: # %bb.0: @@ -843,17 +835,15 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) { ; AVX-LABEL: f32xi16_i32: ; AVX: # %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] -; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddw %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: retl @@ -876,17 +866,15 @@ ; ; AVX-64-LABEL: f32xi16_i32: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-64-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] -; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddw %xmm3, %xmm1, %xmm1 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddw %xmm3, %xmm0, %xmm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-64-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-64-NEXT: retq @@ -912,21 +900,18 @@ } -; FIXME the load should be folded with the MOVDDUP with AVX1. PR39454 define <32 x i16> @f32xi16_i64(<32 x i16> %a) { ; AVX-LABEL: f32xi16_i64: ; AVX: # %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; AVX-NEXT: vmovddup {{.*#+}} xmm3 = xmm3[0,0] -; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddw %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: retl @@ -949,17 +934,15 @@ ; ; AVX-64-LABEL: f32xi16_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = xmm3[0,0] -; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddw %xmm3, %xmm1, %xmm1 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddw %xmm3, %xmm0, %xmm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-64-NEXT: retq @@ -1133,7 +1116,6 @@ } - define <4 x i32> @f4xi32_i64(<4 x i32> %a) { ; AVX-LABEL: f4xi32_i64: ; AVX: # %bb.0: @@ -1254,21 +1236,18 @@ } -; FIXME the load should be folded with the MOVDDUP with AVX1. PR39454 define <16 x i32> @f16xi32_i64(<16 x i32> %a) { ; AVX-LABEL: f16xi32_i64: ; AVX: # %bb.0: -; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; AVX-NEXT: vmovddup {{.*#+}} xmm3 = xmm3[0,0] -; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddd %xmm3, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2 -; AVX-NEXT: vpaddd %xmm3, %xmm0, %xmm0 -; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-NEXT: vpaddd %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-NEXT: vpaddd %xmm2, %xmm3, %xmm3 +; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: retl @@ -1291,17 +1270,15 @@ ; ; AVX-64-LABEL: f16xi32_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = xmm3[0,0] -; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddd %xmm3, %xmm1, %xmm1 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 -; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2 -; AVX-64-NEXT: vpaddd %xmm3, %xmm0, %xmm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 -; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm3, %ymm2 +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX-64-NEXT: vpaddd %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX-64-NEXT: vpaddd %xmm2, %xmm3, %xmm3 +; AVX-64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 +; AVX-64-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-64-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX-64-NEXT: retq @@ -2045,7 +2022,6 @@ } - define <8 x i16> @f8xi16_i32_NaN(<8 x i16> %a) { ; AVX-LABEL: f8xi16_i32_NaN: ; AVX: # %bb.0: Index: llvm/trunk/test/CodeGen/X86/oddshuffles.ll =================================================================== --- llvm/trunk/test/CodeGen/X86/oddshuffles.ll +++ llvm/trunk/test/CodeGen/X86/oddshuffles.ll @@ -1743,10 +1743,10 @@ ; ; AVX2-LABEL: wrongorder: ; AVX2: # %bb.0: -; AVX2-NEXT: vbroadcastsd %xmm0, %ymm1 -; AVX2-NEXT: vmovaps %ymm1, 32(%rdi) -; AVX2-NEXT: vmovaps %ymm1, (%rdi) -; AVX2-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0 +; AVX2-NEXT: vmovaps %ymm0, 32(%rdi) +; AVX2-NEXT: vmovaps %ymm0, (%rdi) +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ;