diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -8547,8 +8547,8 @@ LLVMContext *Ctx = DAG.getContext(); MVT PVT = TLI.getPointerTy(DAG.getDataLayout()); if (Subtarget.hasAVX()) { - if (SplatBitSize <= 64 && Subtarget.hasAVX2() && - !(SplatBitSize == 64 && Subtarget.is32Bit())) { + if (SplatBitSize == 32 || SplatBitSize == 64 || + (SplatBitSize < 32 && Subtarget.hasAVX2())) { // Splatted value can fit in one INTEGER constant in constant pool. // Load the constant and broadcast it. MVT CVT = MVT::getIntegerVT(SplatBitSize); @@ -8567,33 +8567,8 @@ X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment, MachineMemOperand::MOLoad); return DAG.getBitcast(VT, Brdcst); - } else if (SplatBitSize == 32 || SplatBitSize == 64) { - // Splatted value can fit in one FLOAT constant in constant pool. - // Load the constant and broadcast it. - // AVX have support for 32 and 64 bit broadcast for floats only. - // No 64bit integer in 32bit subtarget. - MVT CVT = MVT::getFloatingPointVT(SplatBitSize); - // Lower the splat via APFloat directly, to avoid any conversion. - Constant *C = - SplatBitSize == 32 - ? ConstantFP::get(*Ctx, - APFloat(APFloat::IEEEsingle(), SplatValue)) - : ConstantFP::get(*Ctx, - APFloat(APFloat::IEEEdouble(), SplatValue)); - SDValue CP = DAG.getConstantPool(C, PVT); - unsigned Repeat = VT.getSizeInBits() / SplatBitSize; - - unsigned Alignment = cast(CP)->getAlignment(); - SDVTList Tys = - DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other); - SDValue Ops[] = {DAG.getEntryNode(), CP}; - MachinePointerInfo MPI = - MachinePointerInfo::getConstantPool(DAG.getMachineFunction()); - SDValue Brdcst = DAG.getMemIntrinsicNode( - X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment, - MachineMemOperand::MOLoad); - return DAG.getBitcast(VT, Brdcst); - } else if (SplatBitSize > 64) { + } + if (SplatBitSize > 64) { // Load the vector of constants and broadcast it. MVT CVT = VT.getScalarType(); Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize, diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -1188,7 +1188,7 @@ ; ; AVX1-LABEL: avg_v32i8_const: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [506097522914230528,506097522914230528] ; AVX1-NEXT: # xmm0 = mem[0,0] ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm0 @@ -1239,7 +1239,7 @@ ; ; AVX1-LABEL: avg_v64i8_const: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = [506097522914230528,506097522914230528] ; AVX1-NEXT: # xmm0 = mem[0,0] ; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm1 ; AVX1-NEXT: vpavgb 16(%rdi), %xmm0, %xmm2 diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -155,7 +155,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] ; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 @@ -650,7 +650,7 @@ ; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5] -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-1.7939930131212661E-307,-1.7939930131212661E-307,-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745] ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -197,7 +197,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] ; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 @@ -839,7 +839,7 @@ ; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm1[2,2,3,3,4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5] -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-1.7939930131212661E-307,-1.7939930131212661E-307,-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [9241421688590303745,9241421688590303745,9241421688590303745,9241421688590303745] ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -160,7 +160,7 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] ; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll --- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll +++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll @@ -53,7 +53,7 @@ define <16 x i8> @f16xi8_i32(<16 x i8> %a) { ; AVX-LABEL: f16xi8_i32: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [50462976,50462976,50462976,50462976] ; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retl @@ -67,7 +67,7 @@ ; ; AVX-64-LABEL: f16xi8_i32: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [50462976,50462976,50462976,50462976] ; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: retq @@ -87,7 +87,7 @@ define <16 x i8> @f16xi8_i64(<16 x i8> %a) { ; AVX-LABEL: f16xi8_i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [506097522914230528,506097522914230528] ; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -95,14 +95,14 @@ ; ; ALL32-LABEL: f16xi8_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] +; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [506097522914230528,506097522914230528] ; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f16xi8_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [506097522914230528,506097522914230528] ; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -164,7 +164,7 @@ ; AVX-LABEL: f32xi8_i32: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [50462976,50462976,50462976,50462976] ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -181,7 +181,7 @@ ; AVX-64-LABEL: f32xi8_i32: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [50462976,50462976,50462976,50462976] ; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -204,7 +204,7 @@ ; AVX-LABEL: f32xi8_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [506097522914230528,506097522914230528] ; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 @@ -214,7 +214,7 @@ ; ; ALL32-LABEL: f32xi8_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; ALL32-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: retl @@ -222,7 +222,7 @@ ; AVX-64-LABEL: f32xi8_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [506097522914230528,506097522914230528] ; AVX-64-NEXT: # xmm2 = mem[0,0] ; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0 @@ -357,7 +357,7 @@ define <64 x i8> @f64i8_i32(<64 x i8> %a) { ; AVX-LABEL: f64i8_i32: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} ymm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-NEXT: vbroadcastss {{.*#+}} ymm2 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976] ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-NEXT: vpaddb %xmm2, %xmm3, %xmm3 ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 @@ -388,7 +388,7 @@ ; ; AVX-64-LABEL: f64i8_i32: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastss {{.*#+}} ymm2 = [3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37,3.82047143E-37] +; AVX-64-NEXT: vbroadcastss {{.*#+}} ymm2 = [50462976,50462976,50462976,50462976,50462976,50462976,50462976,50462976] ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-64-NEXT: vpaddb %xmm2, %xmm3, %xmm3 ; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 @@ -425,7 +425,7 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) { ; AVX-LABEL: f64xi8_i64: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-NEXT: vpaddb %xmm2, %xmm3, %xmm3 ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 @@ -440,7 +440,7 @@ ; ; AVX2-LABEL: f64xi8_i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 @@ -449,14 +449,14 @@ ; ; AVX512BW-LABEL: f64xi8_i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retl ; ; AVX-64-LABEL: f64xi8_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [506097522914230528,506097522914230528,506097522914230528,506097522914230528] ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-64-NEXT: vpaddb %xmm2, %xmm3, %xmm3 ; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 @@ -641,7 +641,7 @@ define <8 x i16> @f8xi16_i32(<8 x i16> %a) { ; AVX-LABEL: f8xi16_i32: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [65536,65536,65536,65536] ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retl @@ -655,7 +655,7 @@ ; ; AVX-64-LABEL: f8xi16_i32: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [65536,65536,65536,65536] ; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: retq @@ -675,7 +675,7 @@ define <8 x i16> @f8xi16_i64(<8 x i16> %a) { ; AVX-LABEL: f8xi16_i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [844433520132096,844433520132096] ; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -683,14 +683,14 @@ ; ; ALL32-LABEL: f8xi16_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] +; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [844433520132096,844433520132096] ; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f8xi16_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [844433520132096,844433520132096] ; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -712,7 +712,7 @@ ; AVX-LABEL: f16xi16_i32: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [65536,65536,65536,65536] ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -729,7 +729,7 @@ ; AVX-64-LABEL: f16xi16_i32: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm2 = [65536,65536,65536,65536] ; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -752,7 +752,7 @@ ; AVX-LABEL: f16xi16_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [844433520132096,844433520132096] ; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 @@ -762,7 +762,7 @@ ; ; ALL32-LABEL: f16xi16_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [844433520132096,844433520132096,844433520132096,844433520132096] ; ALL32-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: retl @@ -770,7 +770,7 @@ ; AVX-64-LABEL: f16xi16_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [844433520132096,844433520132096] ; AVX-64-NEXT: # xmm2 = mem[0,0] ; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0 @@ -835,7 +835,7 @@ define <32 x i16> @f32xi16_i32(<32 x i16> %a) { ; AVX-LABEL: f32xi16_i32: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-NEXT: vbroadcastss {{.*#+}} ymm2 = [65536,65536,65536,65536,65536,65536,65536,65536] ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-NEXT: vpaddw %xmm2, %xmm3, %xmm3 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 @@ -866,7 +866,7 @@ ; ; AVX-64-LABEL: f32xi16_i32: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41,9.18354962E-41] +; AVX-64-NEXT: vbroadcastss {{.*#+}} ymm2 = [65536,65536,65536,65536,65536,65536,65536,65536] ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-64-NEXT: vpaddw %xmm2, %xmm3, %xmm3 ; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 @@ -903,7 +903,7 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) { ; AVX-LABEL: f32xi16_i64: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [844433520132096,844433520132096,844433520132096,844433520132096] ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-NEXT: vpaddw %xmm2, %xmm3, %xmm3 ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 @@ -918,7 +918,7 @@ ; ; AVX2-LABEL: f32xi16_i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [844433520132096,844433520132096,844433520132096,844433520132096] ; AVX2-NEXT: vpaddw %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpaddw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 @@ -927,14 +927,14 @@ ; ; AVX512BW-LABEL: f32xi16_i64: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; AVX512BW-NEXT: vpbroadcastq {{.*#+}} zmm1 = [844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096,844433520132096] ; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retl ; ; AVX-64-LABEL: f32xi16_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [844433520132096,844433520132096,844433520132096,844433520132096] ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-64-NEXT: vpaddw %xmm2, %xmm3, %xmm3 ; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 @@ -1119,7 +1119,7 @@ define <4 x i32> @f4xi32_i64(<4 x i32> %a) { ; AVX-LABEL: f4xi32_i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [4294967296,4294967296] ; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -1127,14 +1127,14 @@ ; ; ALL32-LABEL: f4xi32_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] +; ALL32-NEXT: vpbroadcastq {{.*#+}} xmm1 = [4294967296,4294967296] ; ALL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f4xi32_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [4294967296,4294967296] ; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -1156,7 +1156,7 @@ ; AVX-LABEL: f8xi32_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [4294967296,4294967296] ; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 @@ -1166,7 +1166,7 @@ ; ; ALL32-LABEL: f8xi32_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; ALL32-NEXT: vpbroadcastq {{.*#+}} ymm1 = [4294967296,4294967296,4294967296,4294967296] ; ALL32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: vpand %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: retl @@ -1174,7 +1174,7 @@ ; AVX-64-LABEL: f8xi32_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [4294967296,4294967296] ; AVX-64-NEXT: # xmm2 = mem[0,0] ; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 @@ -1239,7 +1239,7 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) { ; AVX-LABEL: f16xi32_i64: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967296,4294967296,4294967296,4294967296] ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-NEXT: vpaddd %xmm2, %xmm3, %xmm3 ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 @@ -1254,7 +1254,7 @@ ; ; AVX2-LABEL: f16xi32_i64: ; AVX2: # %bb.0: -; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4294967296,4294967296,4294967296,4294967296] ; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 @@ -1263,14 +1263,14 @@ ; ; AVX512-LABEL: f16xi32_i64: ; AVX512: # %bb.0: -; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm1 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; AVX512-NEXT: vpbroadcastq {{.*#+}} zmm1 = [4294967296,4294967296,4294967296,4294967296,4294967296,4294967296,4294967296,4294967296] ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: retl ; ; AVX-64-LABEL: f16xi32_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967296,4294967296,4294967296,4294967296] ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX-64-NEXT: vpaddd %xmm2, %xmm3, %xmm3 ; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1 @@ -1578,7 +1578,7 @@ define <4 x float> @f4xf32_f64(<4 x float> %a) { ; AVX-LABEL: f4xf32_f64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [7.8125018626451492E-3,7.8125018626451492E-3] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760] ; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0 @@ -1586,7 +1586,7 @@ ; ; ALL32-LABEL: f4xf32_f64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [7.8125018626451492E-3,7.8125018626451492E-3] +; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760] ; ALL32-NEXT: # xmm1 = mem[0,0] ; ALL32-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vdivps %xmm0, %xmm1, %xmm0 @@ -1594,7 +1594,7 @@ ; ; AVX-64-LABEL: f4xf32_f64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [7.8125018626451492E-3,7.8125018626451492E-3] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [4575657222482165760,4575657222482165760] ; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vdivps %xmm0, %xmm1, %xmm0 @@ -1616,21 +1616,21 @@ define <8 x float> @f8xf32_f64(<8 x float> %a) { ; AVX-LABEL: f8xf32_f64: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastsd {{.*#+}} ymm1 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f8xf32_f64: ; ALL32: # %bb.0: -; ALL32-NEXT: vbroadcastsd {{.*#+}} ymm1 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; ALL32-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; ALL32-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; ALL32-NEXT: vdivps %ymm0, %ymm1, %ymm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f8xf32_f64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm1 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; AVX-64-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX-64-NEXT: vdivps %ymm0, %ymm1, %ymm0 ; AVX-64-NEXT: retq @@ -1688,7 +1688,7 @@ define <16 x float> @f16xf32_f64(<16 x float> %a) { ; AVX-LABEL: f16xf32_f64: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; AVX-NEXT: vaddps %ymm2, %ymm1, %ymm1 ; AVX-NEXT: vaddps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vdivps %ymm0, %ymm2, %ymm0 @@ -1697,7 +1697,7 @@ ; ; AVX2-LABEL: f16xf32_f64: ; AVX2: # %bb.0: -; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vaddps %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vdivps %ymm0, %ymm2, %ymm0 @@ -1706,14 +1706,14 @@ ; ; AVX512-LABEL: f16xf32_f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vbroadcastsd {{.*#+}} zmm1 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; AVX512-NEXT: vbroadcastsd {{.*#+}} zmm1 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: retl ; ; AVX-64-LABEL: f16xf32_f64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3,7.8125018626451492E-3] +; AVX-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4575657222482165760,4575657222482165760,4575657222482165760,4575657222482165760] ; AVX-64-NEXT: vaddps %ymm2, %ymm1, %ymm1 ; AVX-64-NEXT: vaddps %ymm2, %ymm0, %ymm0 ; AVX-64-NEXT: vdivps %ymm0, %ymm2, %ymm0 @@ -2029,7 +2029,7 @@ define <8 x i16> @f8xi16_i32_NaN(<8 x i16> %a) { ; AVX-LABEL: f8xi16_i32_NaN: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN] +; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4290379776,4290379776,4290379776,4290379776] ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retl @@ -2043,7 +2043,7 @@ ; ; AVX-64-LABEL: f8xi16_i32_NaN: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [NaN,NaN,NaN,NaN] +; AVX-64-NEXT: vbroadcastss {{.*#+}} xmm1 = [4290379776,4290379776,4290379776,4290379776] ; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll b/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll --- a/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll +++ b/llvm/test/CodeGen/X86/horizontal-reduce-umax.ll @@ -53,7 +53,7 @@ ; X86-AVX1-LABEL: test_reduce_v2i64: ; X86-AVX1: ## %bb.0: ; X86-AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX1-NEXT: ## xmm2 = mem[0,0] ; X86-AVX1-NEXT: vxorps %xmm2, %xmm0, %xmm3 ; X86-AVX1-NEXT: vxorps %xmm2, %xmm1, %xmm2 @@ -66,7 +66,7 @@ ; X86-AVX2-LABEL: test_reduce_v2i64: ; X86-AVX2: ## %bb.0: ; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3 ; X86-AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2 ; X86-AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 @@ -527,7 +527,7 @@ ; X86-AVX1-LABEL: test_reduce_v4i64: ; X86-AVX1: ## %bb.0: ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX1-NEXT: ## xmm2 = mem[0,0] ; X86-AVX1-NEXT: vxorps %xmm2, %xmm1, %xmm3 ; X86-AVX1-NEXT: vxorps %xmm2, %xmm0, %xmm4 @@ -546,7 +546,7 @@ ; X86-AVX2-LABEL: test_reduce_v4i64: ; X86-AVX2: ## %bb.0: ; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm3 ; X86-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm4 ; X86-AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 @@ -1222,7 +1222,7 @@ ; ; X86-AVX1-LABEL: test_reduce_v8i64: ; X86-AVX1: ## %bb.0: -; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX1-NEXT: ## xmm2 = mem[0,0] ; X86-AVX1-NEXT: vxorps %xmm2, %xmm1, %xmm3 ; X86-AVX1-NEXT: vxorps %xmm2, %xmm0, %xmm4 @@ -1250,7 +1250,7 @@ ; ; X86-AVX2-LABEL: test_reduce_v8i64: ; X86-AVX2: ## %bb.0: -; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] ; X86-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm3 ; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm4 ; X86-AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3 diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll b/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll --- a/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll +++ b/llvm/test/CodeGen/X86/horizontal-reduce-umin.ll @@ -54,7 +54,7 @@ ; X86-AVX1-LABEL: test_reduce_v2i64: ; X86-AVX1: ## %bb.0: ; X86-AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX1-NEXT: ## xmm2 = mem[0,0] ; X86-AVX1-NEXT: vxorps %xmm2, %xmm0, %xmm3 ; X86-AVX1-NEXT: vxorps %xmm2, %xmm1, %xmm2 @@ -67,7 +67,7 @@ ; X86-AVX2-LABEL: test_reduce_v2i64: ; X86-AVX2: ## %bb.0: ; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3 ; X86-AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm2 ; X86-AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2 @@ -464,7 +464,7 @@ ; ; X86-AVX1-LABEL: test_reduce_v4i64: ; X86-AVX1: ## %bb.0: -; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0] +; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808] ; X86-AVX1-NEXT: ## xmm1 = mem[0,0] ; X86-AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm2 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 @@ -484,7 +484,7 @@ ; X86-AVX2-LABEL: test_reduce_v4i64: ; X86-AVX2: ## %bb.0: ; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0] +; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] ; X86-AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3 ; X86-AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm4 ; X86-AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3 @@ -1125,7 +1125,7 @@ ; X86-AVX1-LABEL: test_reduce_v8i64: ; X86-AVX1: ## %bb.0: ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0] +; X86-AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] ; X86-AVX1-NEXT: ## xmm3 = mem[0,0] ; X86-AVX1-NEXT: vxorps %xmm3, %xmm2, %xmm4 ; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 @@ -1152,7 +1152,7 @@ ; ; X86-AVX2-LABEL: test_reduce_v8i64: ; X86-AVX2: ## %bb.0: -; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; X86-AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808] ; X86-AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3 ; X86-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4 ; X86-AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3 diff --git a/llvm/test/CodeGen/X86/i64-to-float.ll b/llvm/test/CodeGen/X86/i64-to-float.ll --- a/llvm/test/CodeGen/X86/i64-to-float.ll +++ b/llvm/test/CodeGen/X86/i64-to-float.ll @@ -179,11 +179,11 @@ ; ; X32-AVX-LABEL: clamp_sitofp_2i64_2f64: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: vmovddup {{.*#+}} xmm1 = [NaN,NaN] +; X32-AVX-NEXT: vmovddup {{.*#+}} xmm1 = [18446744073709551361,18446744073709551361] ; X32-AVX-NEXT: # xmm1 = mem[0,0] ; X32-AVX-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2 ; X32-AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; X32-AVX-NEXT: vmovddup {{.*#+}} xmm1 = [1.2598673968951787E-321,1.2598673968951787E-321] +; X32-AVX-NEXT: vmovddup {{.*#+}} xmm1 = [255,255] ; X32-AVX-NEXT: # xmm1 = mem[0,0] ; X32-AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm2 ; X32-AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/pr30284.ll b/llvm/test/CodeGen/X86/pr30284.ll --- a/llvm/test/CodeGen/X86/pr30284.ll +++ b/llvm/test/CodeGen/X86/pr30284.ll @@ -21,7 +21,7 @@ ; CHECK-NEXT: vpmovd2m %zmm0, %k1 ; CHECK-NEXT: vmovapd 0, %zmm0 ; CHECK-NEXT: vmovapd 64, %zmm1 -; CHECK-NEXT: vbroadcastsd {{.*#+}} zmm2 = [3.3951932655444357E-313,3.3951932655444357E-313,3.3951932655444357E-313,3.3951932655444357E-313,3.3951932655444357E-313,3.3951932655444357E-313,3.3951932655444357E-313,3.3951932655444357E-313] +; CHECK-NEXT: vbroadcastsd {{.*#+}} zmm2 = [68719476736,68719476736,68719476736,68719476736,68719476736,68719476736,68719476736,68719476736] ; CHECK-NEXT: kshiftrw $8, %k1, %k2 ; CHECK-NEXT: vorpd %zmm2, %zmm1, %zmm1 {%k2} ; CHECK-NEXT: vorpd %zmm2, %zmm0, %zmm0 {%k1} diff --git a/llvm/test/CodeGen/X86/vec_uaddo.ll b/llvm/test/CodeGen/X86/vec_uaddo.ll --- a/llvm/test/CodeGen/X86/vec_uaddo.ll +++ b/llvm/test/CodeGen/X86/vec_uaddo.ll @@ -1035,7 +1035,7 @@ ; ; AVX1-LABEL: uaddo_v4i24: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [2.35098856E-38,2.35098856E-38,2.35098856E-38,2.35098856E-38] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll --- a/llvm/test/CodeGen/X86/vec_umulo.ll +++ b/llvm/test/CodeGen/X86/vec_umulo.ll @@ -1691,7 +1691,7 @@ ; ; AVX1-LABEL: umulo_v4i24: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [2.35098856E-38,2.35098856E-38,2.35098856E-38,2.35098856E-38] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[1,1,3,3] diff --git a/llvm/test/CodeGen/X86/vec_usubo.ll b/llvm/test/CodeGen/X86/vec_usubo.ll --- a/llvm/test/CodeGen/X86/vec_usubo.ll +++ b/llvm/test/CodeGen/X86/vec_usubo.ll @@ -1082,7 +1082,7 @@ ; ; AVX1-LABEL: usubo_v4i24: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [2.35098856E-38,2.35098856E-38,2.35098856E-38,2.35098856E-38] +; AVX1-NEXT: vbroadcastss {{.*#+}} xmm2 = [16777215,16777215,16777215,16777215] ; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-blend.ll b/llvm/test/CodeGen/X86/vector-blend.ll --- a/llvm/test/CodeGen/X86/vector-blend.ll +++ b/llvm/test/CodeGen/X86/vector-blend.ll @@ -619,7 +619,7 @@ ; ; AVX1-LABEL: constant_pblendvb_avx2: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [18374686483949879295,18374686483949879295,18374686483949879295,18374686483949879295] ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll --- a/llvm/test/CodeGen/X86/vector-fshl-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll @@ -2041,7 +2041,7 @@ ; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; AVX1-NEXT: vorps %ymm1, %ymm2, %ymm1 -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360] ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 @@ -2194,7 +2194,7 @@ ; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm3 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; XOPAVX1-NEXT: vorps %ymm1, %ymm2, %ymm1 -; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN] +; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360] ; XOPAVX1-NEXT: vpcmov %ymm2, %ymm0, %ymm1, %ymm0 ; XOPAVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll --- a/llvm/test/CodeGen/X86/vector-fshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll @@ -2044,7 +2044,7 @@ ; AVX1-NEXT: vpackuswb %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360] ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -2195,7 +2195,7 @@ ; XOPAVX1-NEXT: vpshlb %xmm4, %xmm0, %xmm0 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; XOPAVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 -; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [NaN,NaN,NaN,NaN] +; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [18446744073709551360,18446744073709551360,18446744073709551360,18446744073709551360] ; XOPAVX1-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0 ; XOPAVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -2393,7 +2393,7 @@ ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] ; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll --- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -89,7 +89,7 @@ ; X32-AVX1-LABEL: var_shift_v4i64: ; X32-AVX1: # %bb.0: ; X32-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 -; X32-AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0] +; X32-AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [9223372036854775808,9223372036854775808] ; X32-AVX1-NEXT: # xmm3 = mem[0,0] ; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4 ; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1] diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -1373,7 +1373,7 @@ define <16 x i16> @shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31(<16 x i16> %a, <16 x i16> %b) { ; AVX1-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41] +; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535] ; AVX1-NEXT: vandnps %ymm1, %ymm2, %ymm1 ; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 @@ -1386,7 +1386,7 @@ ; ; XOPAVX1-LABEL: shuffle_v16i16_00_17_02_19_04_21_06_23_08_25_10_27_12_29_14_31: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41] +; XOPAVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535] ; XOPAVX1-NEXT: vpcmov %ymm2, %ymm1, %ymm0, %ymm0 ; XOPAVX1-NEXT: retq ; @@ -1401,7 +1401,7 @@ define <16 x i16> @shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15(<16 x i16> %a, <16 x i16> %b) { ; AVX1-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41] +; AVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535] ; AVX1-NEXT: vandnps %ymm0, %ymm2, %ymm0 ; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 @@ -1414,7 +1414,7 @@ ; ; XOPAVX1-LABEL: shuffle_v16i16_16_01_18_03_20_05_22_07_24_09_26_11_28_13_30_15: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41,9.18340949E-41] +; XOPAVX1-NEXT: vbroadcastss {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535] ; XOPAVX1-NEXT: vpcmov %ymm2, %ymm0, %ymm1, %ymm0 ; XOPAVX1-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll @@ -2276,7 +2276,7 @@ define <32 x i8> @load_fold_pblendvb(<32 x i8>* %px, <32 x i8> %y) { ; AVX1-LABEL: load_fold_pblendvb: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm1 = [18374686483949879295,18374686483949879295,18374686483949879295,18374686483949879295] ; AVX1-NEXT: vandnps (%rdi), %ymm1, %ymm2 ; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vorps %ymm2, %ymm0, %ymm0 @@ -2297,7 +2297,7 @@ ; ; XOPAVX1-LABEL: load_fold_pblendvb: ; XOPAVX1: # %bb.0: -; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303] +; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm1 = [18374686483949879295,18374686483949879295,18374686483949879295,18374686483949879295] ; XOPAVX1-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0 ; XOPAVX1-NEXT: retq ; @@ -2314,7 +2314,7 @@ define <32 x i8> @load_fold_pblendvb_commute(<32 x i8>* %px, <32 x i8> %y) { ; AVX1-LABEL: load_fold_pblendvb_commute: ; AVX1: # %bb.0: -; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303] +; AVX1-NEXT: vbroadcastsd {{.*#+}} ymm1 = [18374686483949879295,18374686483949879295,18374686483949879295,18374686483949879295] ; AVX1-NEXT: vandnps %ymm0, %ymm1, %ymm0 ; AVX1-NEXT: vandps (%rdi), %ymm1, %ymm1 ; AVX1-NEXT: vorps %ymm0, %ymm1, %ymm0 @@ -2338,7 +2338,7 @@ ; XOPAVX1-LABEL: load_fold_pblendvb_commute: ; XOPAVX1: # %bb.0: ; XOPAVX1-NEXT: vmovdqa (%rdi), %ymm1 -; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303,-5.4861292804117373E+303] +; XOPAVX1-NEXT: vbroadcastsd {{.*#+}} ymm2 = [18374686483949879295,18374686483949879295,18374686483949879295,18374686483949879295] ; XOPAVX1-NEXT: vpcmov %ymm2, %ymm0, %ymm1, %ymm0 ; XOPAVX1-NEXT: retq ;