diff --git a/llvm/test/CodeGen/X86/WidenArith.ll b/llvm/test/CodeGen/X86/WidenArith.ll --- a/llvm/test/CodeGen/X86/WidenArith.ll +++ b/llvm/test/CodeGen/X86/WidenArith.ll @@ -11,7 +11,7 @@ ; X86-NEXT: vcmpltps %ymm1, %ymm0, %ymm0 ; X86-NEXT: vcmpltps %ymm3, %ymm2, %ymm1 ; X86-NEXT: vandps %ymm1, %ymm0, %ymm0 -; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: test: diff --git a/llvm/test/CodeGen/X86/addsub-constant-folding.ll b/llvm/test/CodeGen/X86/addsub-constant-folding.ll --- a/llvm/test/CodeGen/X86/addsub-constant-folding.ll +++ b/llvm/test/CodeGen/X86/addsub-constant-folding.ll @@ -64,7 +64,7 @@ define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) { ; X86-LABEL: vec_add_const_add_const: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_add_const_add_const: @@ -87,7 +87,7 @@ ; X86-NEXT: paddd %xmm1, %xmm0 ; X86-NEXT: calll vec_use@PLT ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: addl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl @@ -115,7 +115,7 @@ define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) { ; X86-LABEL: vec_add_const_add_const_nonsplat: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_add_const_add_const_nonsplat: @@ -186,7 +186,7 @@ define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) { ; X86-LABEL: vec_add_const_sub_const: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_add_const_sub_const: @@ -209,7 +209,7 @@ ; X86-NEXT: paddd %xmm1, %xmm0 ; X86-NEXT: calll vec_use@PLT ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: addl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl @@ -237,7 +237,7 @@ define <4 x i32> @vec_add_const_sub_const_nonsplat(<4 x i32> %arg) { ; X86-LABEL: vec_add_const_sub_const_nonsplat: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_add_const_sub_const_nonsplat: @@ -440,7 +440,7 @@ define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) { ; X86-LABEL: vec_sub_const_add_const: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_sub_const_add_const: @@ -458,10 +458,10 @@ ; X86-NEXT: subl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 32 ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: calll vec_use@PLT ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: addl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl @@ -487,7 +487,7 @@ define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) { ; X86-LABEL: vec_sub_const_add_const_nonsplat: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_sub_const_add_const_nonsplat: @@ -558,7 +558,7 @@ define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) { ; X86-LABEL: vec_sub_const_sub_const: ; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_sub_const_sub_const: @@ -576,10 +576,10 @@ ; X86-NEXT: subl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 32 ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: calll vec_use@PLT ; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: addl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 4 ; X86-NEXT: retl @@ -605,7 +605,7 @@ define <4 x i32> @vec_sub_const_sub_const_nonsplat(<4 x i32> %arg) { ; X86-LABEL: vec_sub_const_sub_const_nonsplat: ; X86: # %bb.0: -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_sub_const_sub_const_nonsplat: @@ -698,7 +698,7 @@ ; X86: # %bb.0: ; X86-NEXT: subl $28, %esp ; X86-NEXT: .cfi_def_cfa_offset 32 -; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill ; X86-NEXT: calll vec_use@PLT ; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2] @@ -1074,7 +1074,7 @@ define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) { ; X86-LABEL: vec_const_sub_const_sub: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_const_sub_const_sub: @@ -1126,7 +1126,7 @@ define <4 x i32> @vec_const_sub_const_sub_nonsplat(<4 x i32> %arg) { ; X86-LABEL: vec_const_sub_const_sub_nonsplat: ; X86: # %bb.0: -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_const_sub_const_sub_nonsplat: diff --git a/llvm/test/CodeGen/X86/atomic-fp.ll b/llvm/test/CodeGen/X86/atomic-fp.ll --- a/llvm/test/CodeGen/X86/atomic-fp.ll +++ b/llvm/test/CodeGen/X86/atomic-fp.ll @@ -200,7 +200,7 @@ ; X86-SSE1-NEXT: movl glob32, %eax ; X86-SSE1-NEXT: movl %eax, (%esp) ; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0 +; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl %eax, glob32 @@ -296,7 +296,7 @@ ; X86-SSE2-NEXT: andl $-8, %esp ; X86-SSE2-NEXT: subl $8, %esp ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: movsd %xmm0, (%esp) ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movlps %xmm0, glob64 @@ -311,7 +311,7 @@ ; X86-AVX-NEXT: andl $-8, %esp ; X86-AVX-NEXT: subl $8, %esp ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovsd %xmm0, (%esp) ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovlps %xmm0, glob64 @@ -361,7 +361,7 @@ ; X86-SSE1-NEXT: movl -559038737, %eax ; X86-SSE1-NEXT: movl %eax, (%esp) ; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0 +; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl %eax, -559038737 @@ -459,7 +459,7 @@ ; X86-SSE2-NEXT: andl $-8, %esp ; X86-SSE2-NEXT: subl $8, %esp ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: movsd %xmm0, (%esp) ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movlps %xmm0, -559038737 @@ -474,7 +474,7 @@ ; X86-AVX-NEXT: andl $-8, %esp ; X86-AVX-NEXT: subl $8, %esp ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovsd %xmm0, (%esp) ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovlps %xmm0, -559038737 @@ -526,7 +526,7 @@ ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl %eax, (%esp) ; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0 +; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp) @@ -628,7 +628,7 @@ ; X86-SSE2-NEXT: andl $-8, %esp ; X86-SSE2-NEXT: subl $16, %esp ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: movsd %xmm0, (%esp) ; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) @@ -643,7 +643,7 @@ ; X86-AVX-NEXT: andl $-8, %esp ; X86-AVX-NEXT: subl $16, %esp ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovsd %xmm0, (%esp) ; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) diff --git a/llvm/test/CodeGen/X86/avx-cmp.ll b/llvm/test/CodeGen/X86/avx-cmp.ll --- a/llvm/test/CodeGen/X86/avx-cmp.ll +++ b/llvm/test/CodeGen/X86/avx-cmp.ll @@ -49,7 +49,7 @@ ; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1 ; CHECK-NEXT: vmovsd (%rsp), %xmm0 # 8-byte Reload ; CHECK-NEXT: # xmm0 = mem[0],zero -; CHECK-NEXT: vucomisd {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: vucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: jne .LBB2_5 ; CHECK-NEXT: jnp .LBB2_2 ; CHECK-NEXT: .LBB2_5: # %if.then diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -645,8 +645,8 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] -; X86-AVX-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: # xmm1 = xmm1[0],mem[0] ; X86-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1] ; X86-AVX-NEXT: vmovupd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x11,0x00] @@ -656,8 +656,8 @@ ; X86-AVX512VL: # %bb.0: ; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04] ; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] -; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0] ; X86-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] ; X86-AVX512VL-NEXT: vmovupd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00] @@ -667,7 +667,7 @@ ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9] ; X64-AVX-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: # xmm1 = xmm1[0],mem[0] ; X64-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1] ; X64-AVX-NEXT: vmovupd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x11,0x07] @@ -677,7 +677,7 @@ ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] ; X64-AVX512VL-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0] ; X64-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] ; X64-AVX512VL-NEXT: vmovupd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07] diff --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll --- a/llvm/test/CodeGen/X86/avx2-arith.ll +++ b/llvm/test/CodeGen/X86/avx2-arith.ll @@ -148,7 +148,7 @@ ; X32-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero ; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero ; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: vextracti128 $1, %ymm0, %xmm1 ; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X32-NEXT: vzeroupper @@ -303,7 +303,7 @@ define <8 x i32> @mul_const6(<8 x i32> %x) { ; X32-LABEL: mul_const6: ; X32: # %bb.0: -; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: mul_const6: diff --git a/llvm/test/CodeGen/X86/avx2-conversions.ll b/llvm/test/CodeGen/X86/avx2-conversions.ll --- a/llvm/test/CodeGen/X86/avx2-conversions.ll +++ b/llvm/test/CodeGen/X86/avx2-conversions.ll @@ -159,7 +159,7 @@ define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) { ; X32-LABEL: trunc_16i16_16i8: ; X32: # %bb.0: -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: vextracti128 $1, %ymm0, %xmm1 ; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 ; X32-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll --- a/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll +++ b/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll @@ -25,28 +25,28 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] +; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packssdw_fold: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> ) ret <16 x i16> %res @@ -74,28 +74,28 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] +; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packsswb_fold: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> , <16 x i16> zeroinitializer) ret <32 x i8> %res @@ -123,28 +123,28 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] +; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packuswb_fold: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> , <16 x i16> zeroinitializer) ret <32 x i8> %res @@ -753,28 +753,28 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] +; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_packusdw_fold: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> ) ret <16 x i16> %res @@ -1025,26 +1025,26 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295] +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] @@ -1053,12 +1053,12 @@ ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX-NEXT: retq # encoding: [0xc3] @@ -1067,12 +1067,12 @@ ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9] ; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] @@ -1103,29 +1103,29 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_256_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; @@ -1133,14 +1133,14 @@ ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX-NEXT: retq # encoding: [0xc3] ; @@ -1148,14 +1148,14 @@ ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res0 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> , <8 x i32> ) @@ -1184,36 +1184,36 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4294967295,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psllv_q_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,18446744073709551615] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_const: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,18446744073709551615] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> , <2 x i64> ) ret <2 x i64> %res @@ -1240,36 +1240,36 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4294967295,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psllv_q_256_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,18446744073709551615] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,18446744073709551615] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> , <4 x i64> ) ret <4 x i64> %res @@ -1296,29 +1296,29 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295] +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; @@ -1326,14 +1326,14 @@ ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX-NEXT: retq # encoding: [0xc3] ; @@ -1341,14 +1341,14 @@ ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res0 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> , <4 x i32> ) @@ -1378,29 +1378,29 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; @@ -1408,14 +1408,14 @@ ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX-NEXT: retq # encoding: [0xc3] ; @@ -1423,14 +1423,14 @@ ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res0 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> , <8 x i32> ) @@ -1460,36 +1460,36 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psrlv_q_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> , <2 x i64> ) ret <2 x i64> %res @@ -1517,36 +1517,36 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4,0] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psrlv_q_256_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vpbroadcastq {{.*#+}} ymm0 = [4,4,4,4] ; X64-AVX-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vpbroadcastq {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4] ; X64-AVX512VL-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> , <4 x i64> ) ret <4 x i64> %res @@ -1573,36 +1573,36 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23] ; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psrav_d_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23] ; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> , <4 x i32> ) ret <4 x i32> %res @@ -1628,36 +1628,36 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] -; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] +; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX-NEXT: retl # encoding: [0xc3] ; ; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] +; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-AVX512VL-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] -; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-AVX512VL-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] +; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const: ; X64-AVX512VL: # %bb.0: ; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> , <8 x i32> ) ret <8 x i32> %res diff --git a/llvm/test/CodeGen/X86/avx2-nontemporal.ll b/llvm/test/CodeGen/X86/avx2-nontemporal.ll --- a/llvm/test/CodeGen/X86/avx2-nontemporal.ll +++ b/llvm/test/CodeGen/X86/avx2-nontemporal.ll @@ -15,21 +15,21 @@ ; X32-NEXT: movl 8(%ebp), %ecx ; X32-NEXT: movl 136(%ebp), %edx ; X32-NEXT: movl (%edx), %eax -; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: vmovntps %ymm0, (%ecx) -; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm2, %ymm0 +; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) -; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntpd %ymm0, (%ecx) -; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm5, %ymm0 +; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm5, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) -; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm4, %ymm0 +; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm4, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) -; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm3, %ymm0 +; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm0 ; X32-NEXT: addl (%edx), %eax ; X32-NEXT: vmovntdq %ymm0, (%ecx) ; X32-NEXT: movl %ebp, %esp diff --git a/llvm/test/CodeGen/X86/avx2-shift.ll b/llvm/test/CodeGen/X86/avx2-shift.ll --- a/llvm/test/CodeGen/X86/avx2-shift.ll +++ b/llvm/test/CodeGen/X86/avx2-shift.ll @@ -424,7 +424,7 @@ ; X32-LABEL: shl9: ; X32: # %bb.0: ; X32-NEXT: vpsllw $3, %ymm0, %ymm0 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: shl9: @@ -440,7 +440,7 @@ ; X32-LABEL: shr9: ; X32: # %bb.0: ; X32-NEXT: vpsrlw $3, %ymm0, %ymm0 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: shr9: @@ -472,7 +472,7 @@ ; X32-LABEL: sra_v32i8: ; X32: # %bb.0: ; X32-NEXT: vpsrlw $3, %ymm0, %ymm0 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll --- a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll @@ -7,7 +7,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2f64_4f64: @@ -26,7 +26,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_2i64_4i64: @@ -45,7 +45,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4f32_8f32: @@ -64,7 +64,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_4i32_8i32: @@ -83,7 +83,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_8i16_16i16: @@ -102,7 +102,7 @@ ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] -; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: test_broadcast_16i8_32i8: @@ -122,7 +122,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1] -; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: vmovapd %xmm1, (%eax) ; X32-NEXT: retl ; @@ -145,7 +145,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] -; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: retl ; @@ -168,7 +168,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1] -; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: vmovaps %xmm1, (%eax) ; X32-NEXT: retl ; @@ -191,7 +191,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] -; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: retl ; @@ -214,7 +214,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] -; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: retl ; @@ -237,7 +237,7 @@ ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1] -; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm1, %ymm0 +; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X32-NEXT: vmovdqa %xmm1, (%eax) ; X32-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll --- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll +++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll @@ -486,10 +486,10 @@ ; X32: # %bb.0: ; X32-NEXT: vpsllw $5, %ymm1, %ymm1 ; X32-NEXT: vpsllw $4, %ymm0, %ymm2 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpsllw $2, %ymm0, %ymm2 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpaddb %ymm0, %ymm0, %ymm2 @@ -692,14 +692,14 @@ ; X32: # %bb.0: ; X32-NEXT: vpsllw $5, %ymm1, %ymm1 ; X32-NEXT: vpsrlw $4, %ymm0, %ymm2 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpsrlw $2, %ymm0, %ymm2 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpsrlw $1, %ymm0, %ymm2 -; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X32-NEXT: retl diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll --- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -1840,7 +1840,7 @@ ; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X86-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1 ; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] -; X86-NEXT: vsubpd {{\.LCPI.*}}, %xmm1, %xmm1 +; X86-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 ; X86-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] ; X86-NEXT: vaddsd %xmm1, %xmm2, %xmm1 ; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] @@ -1888,7 +1888,7 @@ ; X86-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp) ; X86-NEXT: shrl $31, %eax ; X86-NEXT: fildll {{[0-9]+}}(%esp) -; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-NEXT: fstps {{[0-9]+}}(%esp) ; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] @@ -3118,7 +3118,7 @@ define <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_fmsub_round_pd: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2 +; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2 ; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: retl ; @@ -3178,7 +3178,7 @@ define <8 x double> @test_mm512_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_fnmadd_round_pd: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0 +; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0 ; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: retl ; @@ -3349,7 +3349,7 @@ define <8 x double> @test_mm512_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_fmsub_pd: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2 +; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2 ; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ; @@ -3409,7 +3409,7 @@ define <8 x double> @test_mm512_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_fnmadd_pd: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0 +; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0 ; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ; @@ -3582,7 +3582,7 @@ define <16 x float> @test_mm512_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_fmsub_round_ps: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2 +; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2 ; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: retl ; @@ -3642,7 +3642,7 @@ define <16 x float> @test_mm512_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_fnmadd_round_ps: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: retl ; @@ -3813,7 +3813,7 @@ define <16 x float> @test_mm512_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_fmsub_ps: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2 +; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2 ; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ; @@ -3873,7 +3873,7 @@ define <16 x float> @test_mm512_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_fnmadd_ps: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl ; @@ -4046,7 +4046,7 @@ define <8 x double> @test_mm512_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) { ; X86-LABEL: test_mm512_fmsubadd_round_pd: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2 +; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2 ; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: retl ; @@ -4323,7 +4323,7 @@ define <16 x float> @test_mm512_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) { ; X86-LABEL: test_mm512_fmsubadd_round_ps: ; X86: # %bb.0: # %entry -; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2 +; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2 ; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; X86-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll --- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll @@ -7118,9 +7118,9 @@ ; X86-LABEL: test_x86_avx512_psllv_d_512_const: ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0] -; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295] -; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm1, %zmm1 +; X86-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1 ; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; X86-NEXT: retl %res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> , <16 x i32> ) @@ -7191,9 +7191,9 @@ ; X86-LABEL: test_x86_avx512_psllv_q_512_const: ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0] -; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295] -; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm1, %zmm1 +; X86-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1 ; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; X86-NEXT: retl %res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> , <8 x i64> ) @@ -7366,9 +7366,9 @@ ; X86-LABEL: test_x86_avx512_psrlv_d_512_const: ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0] -; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295] -; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm1, %zmm1 +; X86-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1 ; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; X86-NEXT: retl %res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> , <16 x i32> ) @@ -7439,9 +7439,9 @@ ; X86-LABEL: test_x86_avx512_psrlv_q_512_const: ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0] -; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295] -; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm1, %zmm1 +; X86-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1 ; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; X86-NEXT: retl %res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> , <8 x i64> ) diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll --- a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll @@ -1292,18 +1292,18 @@ ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_x86_avx512_psrlv_w_512_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res1 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> , <32 x i16> ) ret <32 x i16> %res1 @@ -1410,18 +1410,18 @@ ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51] ; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsravw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsravw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_psrav32_hi_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51] ; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %1 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> , <32 x i16> ) ret <32 x i16> %1 @@ -1575,18 +1575,18 @@ ; X86: # %bb.0: ; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsllvw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_x86_avx512_psllv_w_512_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res1 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> , <32 x i16> ) ret <32 x i16> %res1 diff --git a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll --- a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll @@ -2153,20 +2153,20 @@ define <8 x i16> @test_int_x86_avx512_psrlv_w_128_const() optsize { ; X86-LABEL: test_int_x86_avx512_psrlv_w_128_const: ; X86: # %bb.0: -; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] +; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_psrlv_w_128_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> , <8 x i16> ) ret <8 x i16> %res @@ -2177,20 +2177,20 @@ define <16 x i16> @test_int_x86_avx512_psrlv_w_256_const() optsize { ; X86-LABEL: test_int_x86_avx512_psrlv_w_256_const: ; X86: # %bb.0: -; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] +; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_psrlv_w_256_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> , <16 x i16> ) ret <16 x i16> %res @@ -2397,20 +2397,20 @@ define <8 x i16> @test_int_x86_avx512_psllv_w_128_const() optsize { ; X86-LABEL: test_int_x86_avx512_psllv_w_128_const: ; X86: # %bb.0: -; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] +; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsllvw {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_psllv_w_128_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535] ; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> , <8 x i16> ) ret <8 x i16> %res @@ -2422,20 +2422,20 @@ define <16 x i16> @test_int_x86_avx512_psllv_w_256_const() optsize { ; X86-LABEL: test_int_x86_avx512_psllv_w_256_const: ; X86: # %bb.0: -; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] +; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsllvw {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_psllv_w_256_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535] ; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> , <16 x i16> ) ret <16 x i16> %res diff --git a/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll b/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll --- a/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll +++ b/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll @@ -5,7 +5,7 @@ define <8 x i64> @avx512_funnel_shift_q_512(<8 x i64> %a0, <8 x i64> %a1) { ; X86-LABEL: avx512_funnel_shift_q_512: ; X86: # %bb.0: -; X86-NEXT: vpshldvq {{\.LCPI.*}}, %zmm1, %zmm0 +; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_q_512: @@ -32,7 +32,7 @@ define <16 x i32> @avx512_funnel_shift_d_512(<16 x i32> %a0, <16 x i32> %a1) { ; X86-LABEL: avx512_funnel_shift_d_512: ; X86: # %bb.0: -; X86-NEXT: vpshldvd {{\.LCPI.*}}, %zmm1, %zmm0 +; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_d_512: @@ -59,7 +59,7 @@ define <32 x i16> @avx512_funnel_shift_w_512(<32 x i16> %a0, <32 x i16> %a1) { ; X86-LABEL: avx512_funnel_shift_w_512: ; X86: # %bb.0: -; X86-NEXT: vpshldvw {{\.LCPI.*}}, %zmm1, %zmm0 +; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_w_512: diff --git a/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll b/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll --- a/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll +++ b/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll @@ -5,7 +5,7 @@ define <2 x i64> @avx512_funnel_shift_q_128(<2 x i64> %a0, <2 x i64> %a1) { ; X86-LABEL: avx512_funnel_shift_q_128: ; X86: # %bb.0: -; X86-NEXT: vpshldvq {{\.LCPI.*}}, %xmm1, %xmm0 +; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_q_128: @@ -21,7 +21,7 @@ define <4 x i64> @avx512_funnel_shift_q_256(<4 x i64> %a0, <4 x i64> %a1) { ; X86-LABEL: avx512_funnel_shift_q_256: ; X86: # %bb.0: -; X86-NEXT: vpshldvq {{\.LCPI.*}}, %ymm1, %ymm0 +; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_q_256: @@ -59,7 +59,7 @@ define <4 x i32> @avx512_funnel_shift_d_128(<4 x i32> %a0, <4 x i32> %a1) { ; X86-LABEL: avx512_funnel_shift_d_128: ; X86: # %bb.0: -; X86-NEXT: vpshldvd {{\.LCPI.*}}, %xmm1, %xmm0 +; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_d_128: @@ -75,7 +75,7 @@ define <8 x i32> @avx512_funnel_shift_d_256(<8 x i32> %a0, <8 x i32> %a1) { ; X86-LABEL: avx512_funnel_shift_d_256: ; X86: # %bb.0: -; X86-NEXT: vpshldvd {{\.LCPI.*}}, %ymm1, %ymm0 +; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_d_256: @@ -113,7 +113,7 @@ define <8 x i16> @avx512_funnel_shift_w_128(<8 x i16> %a0, <8 x i16> %a1) { ; X86-LABEL: avx512_funnel_shift_w_128: ; X86: # %bb.0: -; X86-NEXT: vpshldvw {{\.LCPI.*}}, %xmm1, %xmm0 +; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_w_128: @@ -129,7 +129,7 @@ define <16 x i16> @avx512_funnel_shift_w_256(<16 x i16> %a0, <16 x i16> %a1) { ; X86-LABEL: avx512_funnel_shift_w_256: ; X86: # %bb.0: -; X86-NEXT: vpshldvw {{\.LCPI.*}}, %ymm1, %ymm0 +; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: avx512_funnel_shift_w_256: diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll @@ -1905,7 +1905,7 @@ ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1} +; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask_set1_epi32: @@ -1927,7 +1927,7 @@ ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1} {z} +; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm_maskz_set1_epi32: @@ -1948,7 +1948,7 @@ ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1} +; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 {%k1} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_mask_set1_epi32: @@ -1969,7 +1969,7 @@ ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1} {z} +; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 {%k1} {z} ; X86-NEXT: retl ; ; X64-LABEL: test_mm256_maskz_set1_epi32: diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll @@ -7321,20 +7321,20 @@ define <8 x i32>@test_int_x86_avx512_mask_psrav8_si_const() { ; X86-LABEL: test_int_x86_avx512_mask_psrav8_si_const: ; X86: # %bb.0: -; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] +; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_psrav8_si_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51] ; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x i32> @llvm.x86.avx512.mask.psrav8.si(<8 x i32> , <8 x i32> , <8 x i32> zeroinitializer, i8 -1) ret <8 x i32> %res @@ -8632,20 +8632,20 @@ define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) { ; X86-LABEL: test_int_x86_avx512_mask_psrav_q_128_const: ; X86: # %bb.0: -; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295] +; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295] ; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 -; X86-NEXT: vpsravq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A] -; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 +; X86-NEXT: vpsravq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A] +; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_psrav_q_128_const: ; X64: # %bb.0: ; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,18446744073709551607] ; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A] -; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64> , <2 x i64> , <2 x i64> zeroinitializer, i8 -1) ret <2 x i64> %res diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll --- a/llvm/test/CodeGen/X86/bitreverse.ll +++ b/llvm/test/CodeGen/X86/bitreverse.ll @@ -79,7 +79,7 @@ ; ; X86XOP-LABEL: test_bitreverse_v2i16: ; X86XOP: # %bb.0: -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: retl %b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a) ret <2 x i16> %b @@ -155,7 +155,7 @@ ; X86XOP-LABEL: test_bitreverse_i64: ; X86XOP: # %bb.0: ; X86XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: vpextrd $1, %xmm0, %edx ; X86XOP-NEXT: retl @@ -213,7 +213,7 @@ ; X86XOP-LABEL: test_bitreverse_i32: ; X86XOP: # %bb.0: ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: retl %b = call i32 @llvm.bitreverse.i32(i32 %a) @@ -272,7 +272,7 @@ ; X86XOP-LABEL: test_bitreverse_i24: ; X86XOP: # %bb.0: ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: shrl $8, %eax ; X86XOP-NEXT: retl @@ -332,7 +332,7 @@ ; X86XOP-LABEL: test_bitreverse_i16: ; X86XOP: # %bb.0: ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: # kill: def $ax killed $ax killed $eax ; X86XOP-NEXT: retl @@ -383,7 +383,7 @@ ; X86XOP-LABEL: test_bitreverse_i8: ; X86XOP: # %bb.0: ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: # kill: def $al killed $al killed $eax ; X86XOP-NEXT: retl @@ -436,7 +436,7 @@ ; X86XOP-LABEL: test_bitreverse_i4: ; X86XOP: # %bb.0: ; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86XOP-NEXT: vmovd %xmm0, %eax ; X86XOP-NEXT: shrb $4, %al ; X86XOP-NEXT: # kill: def $al killed $al killed $eax diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll --- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll +++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll @@ -128,7 +128,7 @@ ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f32xi8_i16: @@ -168,7 +168,7 @@ ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f32xi8_i32: @@ -209,7 +209,7 @@ ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f32xi8_i64: @@ -250,7 +250,7 @@ ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f32xi8_i128: @@ -716,7 +716,7 @@ ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f16xi16_i32: @@ -757,7 +757,7 @@ ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f16xi16_i64: @@ -798,7 +798,7 @@ ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f16xi16_i128: @@ -1161,7 +1161,7 @@ ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f8xi32_i64: @@ -1202,7 +1202,7 @@ ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f8xi32_i128: @@ -1386,7 +1386,7 @@ ; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f4xi64_i128: diff --git a/llvm/test/CodeGen/X86/cmov-fp.ll b/llvm/test/CodeGen/X86/cmov-fp.ll --- a/llvm/test/CodeGen/X86/cmov-fp.ll +++ b/llvm/test/CodeGen/X86/cmov-fp.ll @@ -32,7 +32,7 @@ ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovnbe %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -43,7 +43,7 @@ ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovnbe %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -54,7 +54,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: ja .LBB0_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -95,7 +95,7 @@ ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovnb %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -106,7 +106,7 @@ ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovnb %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -117,7 +117,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jae .LBB1_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -158,7 +158,7 @@ ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovb %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -169,7 +169,7 @@ ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovb %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -180,7 +180,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jb .LBB2_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -221,7 +221,7 @@ ; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovbe %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -232,7 +232,7 @@ ; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovbe %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -243,7 +243,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jbe .LBB3_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -286,7 +286,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setg %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -299,7 +299,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setg %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -310,7 +310,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jg .LBB4_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -353,7 +353,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setge %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -366,7 +366,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setge %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -377,7 +377,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jge .LBB5_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -420,7 +420,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setl %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -433,7 +433,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setl %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -444,7 +444,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jl .LBB6_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -487,7 +487,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setle %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -500,7 +500,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setle %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -511,7 +511,7 @@ ; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jle .LBB7_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -565,7 +565,7 @@ ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovnbe %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -576,7 +576,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: ja .LBB8_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -630,7 +630,7 @@ ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovnb %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -641,7 +641,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jae .LBB9_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -695,7 +695,7 @@ ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovb %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -706,7 +706,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jb .LBB10_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -760,7 +760,7 @@ ; NOSSE1-NEXT: flds {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovbe %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -771,7 +771,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jbe .LBB11_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -827,7 +827,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setg %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -838,7 +838,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jg .LBB12_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -894,7 +894,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setge %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -905,7 +905,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jge .LBB13_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -961,7 +961,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setl %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -972,7 +972,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jl .LBB14_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1028,7 +1028,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setle %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1039,7 +1039,7 @@ ; NOCMOV-NEXT: flds {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jle .LBB15_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1058,7 +1058,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fcmovnbe %st(1), %st @@ -1070,7 +1070,7 @@ ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovnbe %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1081,7 +1081,7 @@ ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovnbe %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1092,7 +1092,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: ja .LBB16_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1111,7 +1111,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fcmovnb %st(1), %st @@ -1123,7 +1123,7 @@ ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovnb %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1134,7 +1134,7 @@ ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovnb %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1145,7 +1145,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jae .LBB17_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1164,7 +1164,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fcmovb %st(1), %st @@ -1176,7 +1176,7 @@ ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovb %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1187,7 +1187,7 @@ ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovb %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1198,7 +1198,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jb .LBB18_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1217,7 +1217,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fcmovbe %st(1), %st @@ -1229,7 +1229,7 @@ ; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovbe %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1240,7 +1240,7 @@ ; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp) ; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovbe %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1251,7 +1251,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jbe .LBB19_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1270,7 +1270,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: setg %al @@ -1286,7 +1286,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setg %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1299,7 +1299,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setg %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1310,7 +1310,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jg .LBB20_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1330,7 +1330,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: setge %al @@ -1346,7 +1346,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setge %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1359,7 +1359,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setge %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1370,7 +1370,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jge .LBB21_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1389,7 +1389,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: setl %al @@ -1405,7 +1405,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setl %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1418,7 +1418,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setl %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1429,7 +1429,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jl .LBB22_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) @@ -1448,7 +1448,7 @@ ; SSE: # %bb.0: ; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: fldt {{[0-9]+}}(%esp) -; SSE-NEXT: flds {{\.LCPI.*}} +; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; SSE-NEXT: fxch %st(1) ; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; SSE-NEXT: setle %al @@ -1464,7 +1464,7 @@ ; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE2-NEXT: setle %al ; NOSSE2-NEXT: testb %al, %al -; NOSSE2-NEXT: flds {{\.LCPI.*}} +; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE2-NEXT: fxch %st(1) ; NOSSE2-NEXT: fcmovne %st(1), %st ; NOSSE2-NEXT: fstp %st(1) @@ -1477,7 +1477,7 @@ ; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax ; NOSSE1-NEXT: setle %al ; NOSSE1-NEXT: testb %al, %al -; NOSSE1-NEXT: flds {{\.LCPI.*}} +; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOSSE1-NEXT: fxch %st(1) ; NOSSE1-NEXT: fcmovne %st(1), %st ; NOSSE1-NEXT: fstp %st(1) @@ -1488,7 +1488,7 @@ ; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp) ; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax ; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax -; NOCMOV-NEXT: flds {{\.LCPI.*}} +; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; NOCMOV-NEXT: jle .LBB23_2 ; NOCMOV-NEXT: # %bb.1: ; NOCMOV-NEXT: fstp %st(0) diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll --- a/llvm/test/CodeGen/X86/cmp.ll +++ b/llvm/test/CodeGen/X86/cmp.ll @@ -108,12 +108,12 @@ ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A] -; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: ja .LBB5_3 # encoding: [0x77,A] ; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1 ; CHECK-NEXT: # %bb.1: # %entry ; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A] -; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: jb .LBB5_3 # encoding: [0x72,A] ; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1 ; CHECK-NEXT: # %bb.2: # %bb12 diff --git a/llvm/test/CodeGen/X86/code-model-elf.ll b/llvm/test/CodeGen/X86/code-model-elf.ll --- a/llvm/test/CodeGen/X86/code-model-elf.ll +++ b/llvm/test/CodeGen/X86/code-model-elf.ll @@ -417,13 +417,13 @@ ; ; MEDIUM-STATIC-LABEL: load_constant_pool: ; MEDIUM-STATIC: # %bb.0: -; MEDIUM-STATIC-NEXT: movabsq ${{\.LCPI.*}}, %rax +; MEDIUM-STATIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; MEDIUM-STATIC-NEXT: addss (%rax), %xmm0 ; MEDIUM-STATIC-NEXT: retq ; ; LARGE-STATIC-LABEL: load_constant_pool: ; LARGE-STATIC: # %bb.0: -; LARGE-STATIC-NEXT: movabsq ${{\.LCPI.*}}, %rax +; LARGE-STATIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; LARGE-STATIC-NEXT: addss (%rax), %xmm0 ; LARGE-STATIC-NEXT: retq ; @@ -435,7 +435,7 @@ ; MEDIUM-PIC-LABEL: load_constant_pool: ; MEDIUM-PIC: # %bb.0: ; MEDIUM-PIC-NEXT: leaq {{.*}}(%rip), %rax -; MEDIUM-PIC-NEXT: movabsq ${{\.LCPI.*}}@GOTOFF, %rcx +; MEDIUM-PIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}@GOTOFF, %rcx ; MEDIUM-PIC-NEXT: addss (%rax,%rcx), %xmm0 ; MEDIUM-PIC-NEXT: retq ; @@ -445,7 +445,7 @@ ; LARGE-PIC-NEXT: leaq .L11${{.*}}(%rip), %rax ; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L11$pb, %rcx ; LARGE-PIC-NEXT: addq %rax, %rcx -; LARGE-PIC-NEXT: movabsq ${{\.LCPI.*}}@GOTOFF, %rax +; LARGE-PIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}@GOTOFF, %rax ; LARGE-PIC-NEXT: addss (%rcx,%rax), %xmm0 ; LARGE-PIC-NEXT: retq %a = fadd float %x, 1.0 diff --git a/llvm/test/CodeGen/X86/combine-bextr.ll b/llvm/test/CodeGen/X86/combine-bextr.ll --- a/llvm/test/CodeGen/X86/combine-bextr.ll +++ b/llvm/test/CodeGen/X86/combine-bextr.ll @@ -40,8 +40,8 @@ ; X32-NEXT: movl $3855, %eax # imm = 0xF0F ; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax ; X32-NEXT: movd %eax, %xmm0 -; X32-NEXT: por {{\.LCPI.*}}, %xmm0 -; X32-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: cvtsd2ss %xmm0, %xmm0 ; X32-NEXT: movss %xmm0, (%esp) ; X32-NEXT: flds (%esp) diff --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll --- a/llvm/test/CodeGen/X86/combine-bitreverse.ll +++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll @@ -51,23 +51,23 @@ ; X86-NEXT: packuswb %xmm2, %xmm0 ; X86-NEXT: movdqa %xmm0, %xmm1 ; X86-NEXT: psllw $4, %xmm1 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-NEXT: psrlw $4, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: por %xmm1, %xmm0 ; X86-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-NEXT: pand %xmm0, %xmm1 ; X86-NEXT: psllw $2, %xmm1 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: psrlw $2, %xmm0 ; X86-NEXT: por %xmm1, %xmm0 ; X86-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] ; X86-NEXT: pand %xmm0, %xmm1 ; X86-NEXT: paddb %xmm1, %xmm1 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: psrlw $1, %xmm0 ; X86-NEXT: por %xmm1, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_demandedbits_bitreverse: diff --git a/llvm/test/CodeGen/X86/combine-multiplies.ll b/llvm/test/CodeGen/X86/combine-multiplies.ll --- a/llvm/test/CodeGen/X86/combine-multiplies.ll +++ b/llvm/test/CodeGen/X86/combine-multiplies.ll @@ -116,7 +116,7 @@ ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,242,242,242] ; CHECK-NEXT: paddd %xmm0, %xmm2 -; CHECK-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: movdqa %xmm2, v2 ; CHECK-NEXT: movdqa %xmm0, v3 ; CHECK-NEXT: movdqa %xmm1, x @@ -151,7 +151,7 @@ ; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420] ; CHECK-NEXT: paddd %xmm0, %xmm2 -; CHECK-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: movdqa %xmm2, v2 ; CHECK-NEXT: movdqa %xmm0, v3 ; CHECK-NEXT: movdqa %xmm1, x diff --git a/llvm/test/CodeGen/X86/extractelement-fp.ll b/llvm/test/CodeGen/X86/extractelement-fp.ll --- a/llvm/test/CodeGen/X86/extractelement-fp.ll +++ b/llvm/test/CodeGen/X86/extractelement-fp.ll @@ -328,7 +328,7 @@ ; ; X86-LABEL: extvselectsetcc_crash: ; X86: # %bb.0: -; X86-NEXT: vcmpeqpd {{\.LCPI.*}}, %xmm0, %xmm1 +; X86-NEXT: vcmpeqpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 ; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; X86-NEXT: vandpd %xmm2, %xmm1, %xmm1 ; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 @@ -556,7 +556,7 @@ ; X86-NEXT: movl %esp, %ebp ; X86-NEXT: andl $-8, %esp ; X86-NEXT: subl $8, %esp -; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: vmovlps %xmm0, (%esp) ; X86-NEXT: fldl (%esp) ; X86-NEXT: movl %ebp, %esp @@ -830,8 +830,8 @@ ; X86-NEXT: movl %esp, %ebp ; X86-NEXT: andl $-8, %esp ; X86-NEXT: subl $8, %esp -; X86-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1 -; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: vorps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovlps %xmm0, (%esp) ; X86-NEXT: fldl (%esp) @@ -1111,7 +1111,7 @@ ; X86-NEXT: movl %esp, %ebp ; X86-NEXT: andl $-8, %esp ; X86-NEXT: subl $8, %esp -; X86-NEXT: vandpd {{\.LCPI.*}}, %xmm0, %xmm1 +; X86-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 ; X86-NEXT: vmovddup {{.*#+}} xmm2 = [4.9999999999999994E-1,4.9999999999999994E-1] ; X86-NEXT: # xmm2 = mem[0,0] ; X86-NEXT: vorpd %xmm1, %xmm2, %xmm1 diff --git a/llvm/test/CodeGen/X86/fast-isel-fneg.ll b/llvm/test/CodeGen/X86/fast-isel-fneg.ll --- a/llvm/test/CodeGen/X86/fast-isel-fneg.ll +++ b/llvm/test/CodeGen/X86/fast-isel-fneg.ll @@ -18,7 +18,7 @@ ; SSE2-NEXT: andl $-8, %esp ; SSE2-NEXT: subl $8, %esp ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2-NEXT: movlps %xmm0, (%esp) ; SSE2-NEXT: fldl (%esp) ; SSE2-NEXT: movl %ebp, %esp @@ -40,7 +40,7 @@ ; SSE2: # %bb.0: ; SSE2-NEXT: pushl %eax ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2-NEXT: movss %xmm0, (%esp) ; SSE2-NEXT: flds (%esp) ; SSE2-NEXT: popl %eax @@ -65,7 +65,7 @@ ; SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx ; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2-NEXT: movsd %xmm0, (%eax) ; SSE2-NEXT: retl %a = load double, double* %x diff --git a/llvm/test/CodeGen/X86/fildll.ll b/llvm/test/CodeGen/X86/fildll.ll --- a/llvm/test/CodeGen/X86/fildll.ll +++ b/llvm/test/CodeGen/X86/fildll.ll @@ -36,7 +36,7 @@ ; CHECK-NEXT: movl %ecx, (%esp) ; CHECK-NEXT: shrl $31, %edx ; CHECK-NEXT: fildll (%esp) -; CHECK-NEXT: fadds {{\.LCPI.*}}(,%edx,4) +; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%edx,4) ; CHECK-NEXT: fstpl {{[0-9]+}}(%esp) ; CHECK-NEXT: fldl {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %ebp, %esp diff --git a/llvm/test/CodeGen/X86/fma-scalar-combine.ll b/llvm/test/CodeGen/X86/fma-scalar-combine.ll --- a/llvm/test/CodeGen/X86/fma-scalar-combine.ll +++ b/llvm/test/CodeGen/X86/fma-scalar-combine.ll @@ -548,9 +548,9 @@ ; CHECK-LABEL: fma_const_fmul: ; CHECK: # %bb.0: ; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x59,0x0d,A,A,A,A] -; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: vfmadd132ss {{.*}}(%rip), %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x99,0x05,A,A,A,A] -; CHECK-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; CHECK-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; CHECK-NEXT: # xmm0 = (xmm0 * mem) + xmm1 ; CHECK-NEXT: retq # encoding: [0xc3] %mul1 = fmul contract float %x, 10.0 diff --git a/llvm/test/CodeGen/X86/fmf-flags.ll b/llvm/test/CodeGen/X86/fmf-flags.ll --- a/llvm/test/CodeGen/X86/fmf-flags.ll +++ b/llvm/test/CodeGen/X86/fmf-flags.ll @@ -38,7 +38,7 @@ ; X86-LABEL: fast_fmuladd_opts: ; X86: # %bb.0: ; X86-NEXT: flds {{[0-9]+}}(%esp) -; X86-NEXT: fmuls {{\.LCPI.*}} +; X86-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: retl %res = call fast float @llvm.fmuladd.f32(float %a, float 2.0, float %a) ret float %res @@ -61,9 +61,9 @@ ; X86: # %bb.0: ; X86-NEXT: fldl {{[0-9]+}}(%esp) ; X86-NEXT: fld %st(0) -; X86-NEXT: fmull {{\.LCPI.*}} +; X86-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: fxch %st(1) -; X86-NEXT: fmull {{\.LCPI.*}} +; X86-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: fxch %st(1) ; X86-NEXT: fstpl mul1 ; X86-NEXT: retl @@ -127,7 +127,7 @@ ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, (%esp) ; X86-NEXT: calll __gnu_h2f_ieee -; X86-NEXT: fmuls {{\.LCPI.*}} +; X86-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: fstps (%esp) ; X86-NEXT: calll __gnu_f2h_ieee ; X86-NEXT: movzwl %ax, %eax diff --git a/llvm/test/CodeGen/X86/fp-cvt.ll b/llvm/test/CodeGen/X86/fp-cvt.ll --- a/llvm/test/CodeGen/X86/fp-cvt.ll +++ b/llvm/test/CodeGen/X86/fp-cvt.ll @@ -443,7 +443,7 @@ ; X86-NEXT: andl $-8, %esp ; X86-NEXT: subl $16, %esp ; X86-NEXT: fldt 8(%ebp) -; X86-NEXT: flds {{\.LCPI.*}} +; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: fucom %st(1) ; X86-NEXT: fnstsw %ax ; X86-NEXT: xorl %edx, %edx @@ -523,7 +523,7 @@ ; X86-NEXT: subl $16, %esp ; X86-NEXT: movl 8(%ebp), %eax ; X86-NEXT: fldt (%eax) -; X86-NEXT: flds {{\.LCPI.*}} +; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: fucom %st(1) ; X86-NEXT: fnstsw %ax ; X86-NEXT: xorl %edx, %edx @@ -825,7 +825,7 @@ ; X86-NEXT: movl %eax, (%esp) ; X86-NEXT: shrl $31, %ecx ; X86-NEXT: fildll (%esp) -; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X86-NEXT: movl %ebp, %esp ; X86-NEXT: popl %ebp ; X86-NEXT: retl @@ -837,7 +837,7 @@ ; X64-NEXT: testq %rdi, %rdi ; X64-NEXT: sets %al ; X64-NEXT: fildll -{{[0-9]+}}(%rsp) -; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4) +; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4) ; X64-NEXT: retq %1 = uitofp i64 %a0 to x86_fp80 ret x86_fp80 %1 @@ -857,7 +857,7 @@ ; X86-NEXT: movl %ecx, (%esp) ; X86-NEXT: shrl $31, %eax ; X86-NEXT: fildll (%esp) -; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-NEXT: movl %ebp, %esp ; X86-NEXT: popl %ebp ; X86-NEXT: retl @@ -870,7 +870,7 @@ ; X64-NEXT: testq %rax, %rax ; X64-NEXT: sets %cl ; X64-NEXT: fildll -{{[0-9]+}}(%rsp) -; X64-NEXT: fadds {{\.LCPI.*}}(,%rcx,4) +; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rcx,4) ; X64-NEXT: retq %1 = load i64, i64 *%a0 %2 = uitofp i64 %1 to x86_fp80 diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll --- a/llvm/test/CodeGen/X86/fp-intrinsics.ll +++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll @@ -18,7 +18,7 @@ ; X87-LABEL: f1: ; X87: # %bb.0: # %entry ; X87-NEXT: fld1 -; X87-NEXT: fdivs {{\.LCPI.*}} +; X87-NEXT: fdivs {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: wait ; X87-NEXT: retl ; @@ -27,7 +27,7 @@ ; X86-SSE-NEXT: subl $12, %esp ; X86-SSE-NEXT: .cfi_def_cfa_offset 16 ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE-NEXT: divsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: divsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movsd %xmm0, (%esp) ; X86-SSE-NEXT: fldl (%esp) ; X86-SSE-NEXT: wait @@ -209,7 +209,7 @@ ; X86-SSE-NEXT: cmpl $0, {{[0-9]+}}(%esp) ; X86-SSE-NEXT: jle .LBB3_2 ; X86-SSE-NEXT: # %bb.1: # %if.then -; X86-SSE-NEXT: addsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: .LBB3_2: # %if.end ; X86-SSE-NEXT: movsd %xmm0, (%esp) ; X86-SSE-NEXT: fldl (%esp) @@ -255,7 +255,7 @@ define double @f5() #0 { ; X87-LABEL: f5: ; X87: # %bb.0: # %entry -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fsqrt ; X87-NEXT: wait ; X87-NEXT: retl @@ -297,9 +297,9 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $28, %esp ; X87-NEXT: .cfi_def_cfa_offset 32 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl {{[0-9]+}}(%esp) -; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll pow @@ -355,7 +355,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: movl $3, {{[0-9]+}}(%esp) @@ -411,7 +411,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll sin @@ -462,7 +462,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll cos @@ -513,7 +513,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll exp @@ -564,7 +564,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll exp2 @@ -615,7 +615,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll log @@ -666,7 +666,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll log10 @@ -717,7 +717,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll log2 @@ -768,7 +768,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll rint @@ -816,7 +816,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $12, %esp ; X87-NEXT: .cfi_def_cfa_offset 16 -; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl (%esp) ; X87-NEXT: wait ; X87-NEXT: calll nearbyint @@ -863,7 +863,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: subl $28, %esp ; X87-NEXT: .cfi_def_cfa_offset 32 -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: wait ; X87-NEXT: movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000 @@ -1356,7 +1356,7 @@ ; X87-NEXT: subl $20, %esp ; X87-NEXT: .cfi_def_cfa_offset 24 ; X87-NEXT: fldl {{[0-9]+}}(%esp) -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: wait ; X87-NEXT: xorl %edx, %edx ; X87-NEXT: fcomi %st(1), %st @@ -1541,7 +1541,7 @@ ; X87: # %bb.0: # %entry ; X87-NEXT: pushl %eax ; X87-NEXT: .cfi_def_cfa_offset 8 -; X87-NEXT: fldl {{\.LCPI.*}} +; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fstps (%esp) ; X87-NEXT: flds (%esp) ; X87-NEXT: wait @@ -2437,8 +2437,8 @@ ; X86-SSE-NEXT: subl $12, %esp ; X86-SSE-NEXT: .cfi_def_cfa_offset 16 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movsd %xmm0, (%esp) ; X86-SSE-NEXT: fldl (%esp) ; X86-SSE-NEXT: wait @@ -2480,7 +2480,7 @@ ; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll (%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: wait @@ -2497,7 +2497,7 @@ ; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-SSE-NEXT: shrl $31, %eax ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp) -; X86-SSE-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; X86-SSE-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp) ; X86-SSE-NEXT: wait ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -2658,8 +2658,8 @@ ; X86-SSE-NEXT: pushl %eax ; X86-SSE-NEXT: .cfi_def_cfa_offset 8 ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtsd2ss %xmm0, %xmm0 ; X86-SSE-NEXT: movss %xmm0, (%esp) ; X86-SSE-NEXT: flds (%esp) @@ -2702,7 +2702,7 @@ ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll {{[0-9]+}}(%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstps {{[0-9]+}}(%esp) ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: wait @@ -2719,7 +2719,7 @@ ; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-SSE-NEXT: shrl $31, %eax ; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp) -; X86-SSE-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; X86-SSE-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp) ; X86-SSE-NEXT: wait ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero diff --git a/llvm/test/CodeGen/X86/fp-stack-set-st1.ll b/llvm/test/CodeGen/X86/fp-stack-set-st1.ll --- a/llvm/test/CodeGen/X86/fp-stack-set-st1.ll +++ b/llvm/test/CodeGen/X86/fp-stack-set-st1.ll @@ -4,8 +4,8 @@ define i32 @main() nounwind { ; CHECK-LABEL: main: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: fldl {{\.LCPI.*}} -; CHECK-NEXT: fldl {{\.LCPI.*}} +; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} +; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; CHECK-NEXT: fxch %st(1) ; CHECK-NEXT: #APP ; CHECK-NEXT: fmul %st(1), %st diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll --- a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll +++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll @@ -679,7 +679,7 @@ ; X87-NEXT: andl $-8, %esp ; X87-NEXT: subl $16, %esp ; X87-NEXT: flds 8(%ebp) -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fcom %st(1) ; X87-NEXT: wait ; X87-NEXT: fnstsw %ax @@ -1319,7 +1319,7 @@ ; X87-NEXT: andl $-8, %esp ; X87-NEXT: subl $16, %esp ; X87-NEXT: fldl 8(%ebp) -; X87-NEXT: flds {{\.LCPI.*}} +; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-NEXT: fcom %st(1) ; X87-NEXT: wait ; X87-NEXT: fnstsw %ax diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll --- a/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll +++ b/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll @@ -488,8 +488,8 @@ ; SSE-X86-NEXT: pushl %eax ; SSE-X86-NEXT: .cfi_def_cfa_offset 8 ; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-X86-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; SSE-X86-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; SSE-X86-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE-X86-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-X86-NEXT: cvtsd2ss %xmm0, %xmm0 ; SSE-X86-NEXT: movss %xmm0, (%esp) ; SSE-X86-NEXT: flds (%esp) @@ -509,8 +509,8 @@ ; AVX1-X86-NEXT: pushl %eax ; AVX1-X86-NEXT: .cfi_def_cfa_offset 8 ; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX1-X86-NEXT: vorpd {{\.LCPI.*}}, %xmm0, %xmm0 -; AVX1-X86-NEXT: vsubsd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-X86-NEXT: vorpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; AVX1-X86-NEXT: vsubsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-X86-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0 ; AVX1-X86-NEXT: vmovss %xmm0, (%esp) ; AVX1-X86-NEXT: flds (%esp) @@ -581,7 +581,7 @@ ; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE-X86-NEXT: shrl $31, %eax ; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp) -; SSE-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE-X86-NEXT: fstps {{[0-9]+}}(%esp) ; SSE-X86-NEXT: wait ; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -623,7 +623,7 @@ ; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX-X86-NEXT: shrl $31, %eax ; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-X86-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-X86-NEXT: wait ; AVX-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -671,7 +671,7 @@ ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll {{[0-9]+}}(%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstps {{[0-9]+}}(%esp) ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: wait @@ -1164,8 +1164,8 @@ ; SSE-X86-NEXT: andl $-8, %esp ; SSE-X86-NEXT: subl $8, %esp ; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE-X86-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; SSE-X86-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; SSE-X86-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE-X86-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-X86-NEXT: movsd %xmm0, (%esp) ; SSE-X86-NEXT: fldl (%esp) ; SSE-X86-NEXT: wait @@ -1190,8 +1190,8 @@ ; AVX1-X86-NEXT: andl $-8, %esp ; AVX1-X86-NEXT: subl $8, %esp ; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX1-X86-NEXT: vorpd {{\.LCPI.*}}, %xmm0, %xmm0 -; AVX1-X86-NEXT: vsubsd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-X86-NEXT: vorpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; AVX1-X86-NEXT: vsubsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-X86-NEXT: vmovsd %xmm0, (%esp) ; AVX1-X86-NEXT: fldl (%esp) ; AVX1-X86-NEXT: wait @@ -1268,7 +1268,7 @@ ; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE-X86-NEXT: shrl $31, %eax ; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp) -; SSE-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE-X86-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE-X86-NEXT: wait ; SSE-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -1310,7 +1310,7 @@ ; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX-X86-NEXT: shrl $31, %eax ; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-X86-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-X86-NEXT: wait ; AVX-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -1358,7 +1358,7 @@ ; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll (%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: wait diff --git a/llvm/test/CodeGen/X86/fp128-cast.ll b/llvm/test/CodeGen/X86/fp128-cast.ll --- a/llvm/test/CodeGen/X86/fp128-cast.ll +++ b/llvm/test/CodeGen/X86/fp128-cast.ll @@ -1287,8 +1287,8 @@ ; X32-NEXT: addl $16, %esp ; X32-NEXT: fstpl {{[0-9]+}}(%esp) ; X32-NEXT: testb $-128, {{[0-9]+}}(%esp) -; X32-NEXT: flds {{\.LCPI.*}} -; X32-NEXT: flds {{\.LCPI.*}} +; X32-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} +; X32-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X32-NEXT: jne .LBB26_3 ; X32-NEXT: # %bb.2: # %if.then ; X32-NEXT: fstp %st(1) diff --git a/llvm/test/CodeGen/X86/fp128-i128.ll b/llvm/test/CodeGen/X86/fp128-i128.ll --- a/llvm/test/CodeGen/X86/fp128-i128.ll +++ b/llvm/test/CodeGen/X86/fp128-i128.ll @@ -144,7 +144,7 @@ ; SSE-NEXT: testl %eax, %eax ; SSE-NEXT: sets %cl ; SSE-NEXT: shlq $4, %rcx -; SSE-NEXT: movaps {{\.LCPI.*}}(%rcx), %xmm0 +; SSE-NEXT: movaps {{\.LCPI[0-9]+_[0-9]+}}(%rcx), %xmm0 ; SSE-NEXT: addq $40, %rsp ; SSE-NEXT: retq ; @@ -164,7 +164,7 @@ ; AVX-NEXT: testl %eax, %eax ; AVX-NEXT: sets %cl ; AVX-NEXT: shlq $4, %rcx -; AVX-NEXT: vmovaps {{\.LCPI.*}}(%rcx), %xmm0 +; AVX-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}(%rcx), %xmm0 ; AVX-NEXT: addq $40, %rsp ; AVX-NEXT: retq entry: diff --git a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll --- a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll +++ b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll @@ -588,7 +588,7 @@ ; X86-NEXT: andl $-8, %esp ; X86-NEXT: subl $16, %esp ; X86-NEXT: fldt 8(%ebp) -; X86-NEXT: flds {{\.LCPI.*}} +; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-NEXT: fcom %st(1) ; X86-NEXT: wait ; X86-NEXT: fnstsw %ax @@ -905,7 +905,7 @@ ; X86-NEXT: movl %eax, (%esp) ; X86-NEXT: shrl $31, %ecx ; X86-NEXT: fildll (%esp) -; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X86-NEXT: wait ; X86-NEXT: movl %ebp, %esp ; X86-NEXT: popl %ebp @@ -919,7 +919,7 @@ ; X64-NEXT: testq %rdi, %rdi ; X64-NEXT: sets %al ; X64-NEXT: fildll -{{[0-9]+}}(%rsp) -; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4) +; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4) ; X64-NEXT: wait ; X64-NEXT: retq %result = call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i64(i64 %x, diff --git a/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll b/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll --- a/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll +++ b/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll @@ -105,7 +105,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -117,7 +117,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: .LBB1_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -176,7 +176,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -188,7 +188,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB2_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -248,7 +248,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -260,7 +260,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB3_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -320,7 +320,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -332,7 +332,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB4_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -362,8 +362,8 @@ ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: ucomiss %xmm0, %xmm0 -; X86-SSE-NEXT: maxss {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: maxss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttss2si %xmm0, %ecx ; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: retl @@ -393,7 +393,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -405,7 +405,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB5_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -434,7 +434,7 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: cvttss2si %xmm0, %eax -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %eax, %ecx ; X86-SSE-NEXT: xorl %eax, %eax @@ -471,7 +471,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -489,7 +489,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB6_4: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -537,12 +537,12 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -586,7 +586,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -604,7 +604,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB7_4: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -652,12 +652,12 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -695,7 +695,7 @@ ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fucompp @@ -723,7 +723,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB8_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -789,7 +789,7 @@ ; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: xorl %ebp, %ebp -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-8, %ebx ; X86-SSE-NEXT: movl $0, %ecx ; X86-SSE-NEXT: movl $0, %edx @@ -801,7 +801,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB8_2: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: cmoval %eax, %edi ; X86-SSE-NEXT: cmoval %eax, %edx @@ -864,7 +864,7 @@ ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fucompp @@ -888,7 +888,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB9_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -964,7 +964,7 @@ ; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovbl %ecx, %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx @@ -973,7 +973,7 @@ ; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %ebx, %ebp ; X86-SSE-NEXT: movl $-1, %ebx @@ -1125,7 +1125,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1137,7 +1137,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: .LBB11_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1196,7 +1196,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1208,7 +1208,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB12_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1268,7 +1268,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1280,7 +1280,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB13_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1340,7 +1340,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1352,7 +1352,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB14_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1382,8 +1382,8 @@ ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: ucomisd %xmm0, %xmm0 -; X86-SSE-NEXT: maxsd {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: maxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx ; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: retl @@ -1413,7 +1413,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1425,7 +1425,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB15_2: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1455,8 +1455,8 @@ ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: ucomisd %xmm0, %xmm0 -; X86-SSE-NEXT: maxsd {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: maxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx ; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: retl @@ -1489,7 +1489,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1507,7 +1507,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB16_4: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1555,12 +1555,12 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -1600,7 +1600,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1618,7 +1618,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB17_4: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1666,12 +1666,12 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -1709,7 +1709,7 @@ ; X86-X87-NEXT: fstl {{[0-9]+}}(%esp) ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; X86-X87-NEXT: fucompp @@ -1737,7 +1737,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB18_6: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1803,7 +1803,7 @@ ; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: xorl %ebp, %ebp -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-8, %ebx ; X86-SSE-NEXT: movl $0, %ecx ; X86-SSE-NEXT: movl $0, %edx @@ -1815,7 +1815,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB18_2: -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: cmoval %eax, %edi ; X86-SSE-NEXT: cmoval %eax, %edx @@ -1878,7 +1878,7 @@ ; X86-X87-NEXT: fstl {{[0-9]+}}(%esp) ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill ; X86-X87-NEXT: fucompp @@ -1902,7 +1902,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB19_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -1978,7 +1978,7 @@ ; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovbl %ecx, %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx @@ -1987,7 +1987,7 @@ ; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %ebx, %ebp ; X86-SSE-NEXT: movl $-1, %ebx @@ -2153,7 +2153,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2165,7 +2165,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: .LBB21_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2236,7 +2236,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2248,7 +2248,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB22_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2320,7 +2320,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2332,7 +2332,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB23_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2404,7 +2404,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2416,7 +2416,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB24_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2451,8 +2451,8 @@ ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: ucomiss %xmm0, %xmm0 -; X86-SSE-NEXT: maxss {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: maxss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttss2si %xmm0, %ecx ; X86-SSE-NEXT: cmovnpl %ecx, %eax ; X86-SSE-NEXT: addl $12, %esp @@ -2489,7 +2489,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2501,7 +2501,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB25_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2535,7 +2535,7 @@ ; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp) ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: cvttss2si %xmm0, %eax -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %eax, %ecx ; X86-SSE-NEXT: xorl %eax, %eax @@ -2579,7 +2579,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2597,7 +2597,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB26_4: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2649,12 +2649,12 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -2704,7 +2704,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2722,7 +2722,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB27_4: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2774,12 +2774,12 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -2823,7 +2823,7 @@ ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fucompp @@ -2851,7 +2851,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB28_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -2923,7 +2923,7 @@ ; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: xorl %ebp, %ebp -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-8, %ebx ; X86-SSE-NEXT: movl $0, %ecx ; X86-SSE-NEXT: movl $0, %edx @@ -2935,7 +2935,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB28_2: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: cmoval %eax, %edi ; X86-SSE-NEXT: cmoval %eax, %edx @@ -3002,7 +3002,7 @@ ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: fsts {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill ; X86-X87-NEXT: fucompp @@ -3026,7 +3026,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB29_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3108,7 +3108,7 @@ ; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload ; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: cmovbl %ecx, %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx @@ -3117,7 +3117,7 @@ ; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF ; X86-SSE-NEXT: cmovbel %ebx, %ebp ; X86-SSE-NEXT: movl $-1, %ebx @@ -3311,7 +3311,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3323,7 +3323,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl ; X86-X87-NEXT: .LBB31_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3359,13 +3359,13 @@ ; X86-SSE-NEXT: fists {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: movl $128, %ecx ; X86-SSE-NEXT: cmovael %eax, %ecx -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3422,7 +3422,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3434,7 +3434,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB32_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3470,7 +3470,7 @@ ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fists {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3479,7 +3479,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB32_2: -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3538,7 +3538,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fists {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3550,7 +3550,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB33_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3586,7 +3586,7 @@ ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fists {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3595,7 +3595,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB33_2: -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3654,7 +3654,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3666,7 +3666,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB34_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3702,7 +3702,7 @@ ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw (%esp) -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3711,7 +3711,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB34_2: -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3768,7 +3768,7 @@ ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-X87-NEXT: fistl {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3780,7 +3780,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB35_2: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3816,7 +3816,7 @@ ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw (%esp) -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3825,7 +3825,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB35_2: -; X86-SSE-NEXT: fldl {{\.LCPI.*}} +; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3885,7 +3885,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3903,7 +3903,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB36_4: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -3950,7 +3950,7 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -3958,7 +3958,7 @@ ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: fldl {{\.LCPI.*}} +; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -4024,7 +4024,7 @@ ; X86-X87-NEXT: fld %st(0) ; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -4042,7 +4042,7 @@ ; X86-X87-NEXT: # %bb.3: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB37_4: -; X86-X87-NEXT: fldt {{\.LCPI.*}} +; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -4089,7 +4089,7 @@ ; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp) ; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp) ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -4097,7 +4097,7 @@ ; X86-SSE-NEXT: cmovbl %ecx, %esi ; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: fldt {{\.LCPI.*}} +; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -4161,7 +4161,7 @@ ; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp) ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fld %st(1) ; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill ; X86-X87-NEXT: fxch %st(1) @@ -4190,7 +4190,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-X87-NEXT: .LBB38_6: -; X86-X87-NEXT: fldt {{\.LCPI.*}} +; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -4258,7 +4258,7 @@ ; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-SSE-NEXT: xorl %ebp, %ebp -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -4273,7 +4273,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB38_2: -; X86-SSE-NEXT: fldt {{\.LCPI.*}} +; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -4347,7 +4347,7 @@ ; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp) ; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fld %st(1) ; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill ; X86-X87-NEXT: fxch %st(1) @@ -4372,7 +4372,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB39_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill -; X86-X87-NEXT: fldt {{\.LCPI.*}} +; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fstp %st(1) @@ -4449,7 +4449,7 @@ ; X86-SSE-NEXT: calll __fixxfti ; X86-SSE-NEXT: subl $4, %esp ; X86-SSE-NEXT: xorl %ecx, %ecx -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) @@ -4461,7 +4461,7 @@ ; X86-SSE-NEXT: cmovbl %ecx, %edi ; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000 ; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx -; X86-SSE-NEXT: fldt {{\.LCPI.*}} +; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) diff --git a/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll b/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll --- a/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll +++ b/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll @@ -107,7 +107,7 @@ ; X86-X87-NEXT: .LBB1_1: ; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: .LBB1_3: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -168,7 +168,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB2_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -230,7 +230,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB3_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -293,7 +293,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB4_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -312,7 +312,7 @@ ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: maxss %xmm1, %xmm0 -; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttss2si %xmm0, %eax ; X86-SSE-NEXT: retl ; @@ -352,7 +352,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB5_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -381,7 +381,7 @@ ; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: ucomiss %xmm1, %xmm0 ; X86-SSE-NEXT: cmovael %ecx, %edx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: cmovbel %edx, %eax ; X86-SSE-NEXT: retl @@ -407,7 +407,7 @@ ; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: subl $16, %esp ; X86-X87-NEXT: flds {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: xorl %ecx, %ecx @@ -449,7 +449,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB6_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -503,7 +503,7 @@ ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: .LBB6_4: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -534,7 +534,7 @@ ; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: subl $20, %esp ; X86-X87-NEXT: flds {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: xorl %ecx, %ecx @@ -576,7 +576,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: .LBB7_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -627,7 +627,7 @@ ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB7_4: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %ecx ; X86-SSE-NEXT: cmoval %ecx, %edx ; X86-SSE-NEXT: cmoval %ecx, %eax @@ -698,7 +698,7 @@ ; X86-X87-NEXT: .LBB8_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -759,7 +759,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB8_2: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $15, %ebx ; X86-SSE-NEXT: cmovbel %edi, %ebx ; X86-SSE-NEXT: movl $-1, %edi @@ -843,7 +843,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB9_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -900,7 +900,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB9_2: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: cmoval %ebx, %edi ; X86-SSE-NEXT: cmoval %ebx, %edx @@ -1043,7 +1043,7 @@ ; X86-X87-NEXT: .LBB11_1: ; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: .LBB11_3: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1104,7 +1104,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB12_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1166,7 +1166,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB13_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1229,7 +1229,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB14_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1248,7 +1248,7 @@ ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: xorpd %xmm1, %xmm1 ; X86-SSE-NEXT: maxsd %xmm1, %xmm0 -; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttsd2si %xmm0, %eax ; X86-SSE-NEXT: retl ; @@ -1288,7 +1288,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB15_2: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1307,7 +1307,7 @@ ; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE-NEXT: xorpd %xmm1, %xmm1 ; X86-SSE-NEXT: maxsd %xmm1, %xmm0 -; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx ; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; X86-SSE-NEXT: movapd %xmm0, %xmm2 @@ -1337,7 +1337,7 @@ ; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: subl $16, %esp ; X86-X87-NEXT: fldl {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: xorl %ecx, %ecx @@ -1379,7 +1379,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB16_6: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1433,7 +1433,7 @@ ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: .LBB16_4: -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -1460,7 +1460,7 @@ ; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: subl $20, %esp ; X86-X87-NEXT: fldl {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: xorl %ecx, %ecx @@ -1502,7 +1502,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: .LBB17_6: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1553,7 +1553,7 @@ ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB17_4: -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %ecx ; X86-SSE-NEXT: cmoval %ecx, %edx ; X86-SSE-NEXT: cmoval %ecx, %eax @@ -1624,7 +1624,7 @@ ; X86-X87-NEXT: .LBB18_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1685,7 +1685,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB18_2: -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $15, %ebx ; X86-SSE-NEXT: cmovbel %edi, %ebx ; X86-SSE-NEXT: movl $-1, %edi @@ -1769,7 +1769,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB19_6: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -1826,7 +1826,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB19_2: -; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: cmoval %ebx, %edi ; X86-SSE-NEXT: cmoval %ebx, %edx @@ -1983,7 +1983,7 @@ ; X86-X87-NEXT: .LBB21_1: ; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: .LBB21_3: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2056,7 +2056,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB22_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2130,7 +2130,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB23_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2205,7 +2205,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB24_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2229,7 +2229,7 @@ ; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: maxss %xmm1, %xmm0 -; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvttss2si %xmm0, %eax ; X86-SSE-NEXT: addl $12, %esp ; X86-SSE-NEXT: retl @@ -2276,7 +2276,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB25_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2310,7 +2310,7 @@ ; X86-SSE-NEXT: xorps %xmm1, %xmm1 ; X86-SSE-NEXT: ucomiss %xmm1, %xmm0 ; X86-SSE-NEXT: cmovael %ecx, %edx -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %eax ; X86-SSE-NEXT: cmovbel %edx, %eax ; X86-SSE-NEXT: addl $12, %esp @@ -2343,7 +2343,7 @@ ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: calll __gnu_h2f_ieee -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax @@ -2385,7 +2385,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB26_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2443,7 +2443,7 @@ ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-SSE-NEXT: .LBB26_4: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF ; X86-SSE-NEXT: cmovbel %eax, %edx ; X86-SSE-NEXT: movl $-1, %eax @@ -2480,7 +2480,7 @@ ; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; X86-X87-NEXT: movl %eax, (%esp) ; X86-X87-NEXT: calll __gnu_h2f_ieee -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax @@ -2522,7 +2522,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: .LBB27_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2577,7 +2577,7 @@ ; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: .LBB27_4: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %ecx ; X86-SSE-NEXT: cmoval %ecx, %edx ; X86-SSE-NEXT: cmoval %ecx, %eax @@ -2654,7 +2654,7 @@ ; X86-X87-NEXT: .LBB28_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2721,7 +2721,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB28_2: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $15, %ebx ; X86-SSE-NEXT: cmovbel %edi, %ebx ; X86-SSE-NEXT: movl $-1, %edi @@ -2809,7 +2809,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB29_6: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -2872,7 +2872,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB29_2: -; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movl $-1, %ebx ; X86-SSE-NEXT: cmoval %ebx, %edi ; X86-SSE-NEXT: cmoval %ebx, %edx @@ -3051,7 +3051,7 @@ ; X86-X87-NEXT: .LBB31_1: ; X86-X87-NEXT: xorl %ecx, %ecx ; X86-X87-NEXT: .LBB31_3: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3083,7 +3083,7 @@ ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: fstp %st(1) ; X86-SSE-NEXT: cmovael %eax, %ecx -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3146,7 +3146,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB32_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3181,7 +3181,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: .LBB32_2: -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3246,7 +3246,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB33_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3281,7 +3281,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: .LBB33_2: -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3347,7 +3347,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB34_2: -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3382,7 +3382,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: .LBB34_2: -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3447,7 +3447,7 @@ ; X86-X87-NEXT: # %bb.1: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-X87-NEXT: .LBB35_2: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3482,7 +3482,7 @@ ; X86-SSE-NEXT: # %bb.1: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: .LBB35_2: -; X86-SSE-NEXT: fldl {{\.LCPI.*}} +; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3528,7 +3528,7 @@ ; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: subl $16, %esp ; X86-X87-NEXT: fldt {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: xorl %ecx, %ecx @@ -3570,7 +3570,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB36_6: -; X86-X87-NEXT: fldl {{\.LCPI.*}} +; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3595,7 +3595,7 @@ ; X86-SSE-NEXT: pushl %esi ; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp) -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: xorl %eax, %eax ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: setbe %cl @@ -3625,7 +3625,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl %eax, %esi ; X86-SSE-NEXT: .LBB36_2: -; X86-SSE-NEXT: fldl {{\.LCPI.*}} +; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3682,7 +3682,7 @@ ; X86-X87-NEXT: pushl %esi ; X86-X87-NEXT: subl $20, %esp ; X86-X87-NEXT: fldt {{[0-9]+}}(%esp) -; X86-X87-NEXT: flds {{\.LCPI.*}} +; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fucom %st(1) ; X86-X87-NEXT: fnstsw %ax ; X86-X87-NEXT: xorl %ecx, %ecx @@ -3724,7 +3724,7 @@ ; X86-X87-NEXT: # %bb.5: ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-X87-NEXT: .LBB37_6: -; X86-X87-NEXT: fldt {{\.LCPI.*}} +; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fxch %st(1) ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3747,7 +3747,7 @@ ; X86-SSE-NEXT: pushl %ebx ; X86-SSE-NEXT: subl $16, %esp ; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp) -; X86-SSE-NEXT: flds {{\.LCPI.*}} +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: xorl %ecx, %ecx ; X86-SSE-NEXT: fucomi %st(1), %st ; X86-SSE-NEXT: setbe %bl @@ -3777,7 +3777,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movl %ecx, %edx ; X86-SSE-NEXT: .LBB37_2: -; X86-SSE-NEXT: fldt {{\.LCPI.*}} +; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -3869,7 +3869,7 @@ ; X86-X87-NEXT: .LBB38_6: ; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-X87-NEXT: fldt {{\.LCPI.*}} +; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -3933,7 +3933,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB38_2: -; X86-SSE-NEXT: fldt {{\.LCPI.*}} +; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) @@ -4028,7 +4028,7 @@ ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-X87-NEXT: .LBB39_6: -; X86-X87-NEXT: fldt {{\.LCPI.*}} +; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload ; X86-X87-NEXT: fucompp ; X86-X87-NEXT: fnstsw %ax @@ -4088,7 +4088,7 @@ ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-SSE-NEXT: .LBB39_2: -; X86-SSE-NEXT: fldt {{\.LCPI.*}} +; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE-NEXT: fxch %st(1) ; X86-SSE-NEXT: fucompi %st(1), %st ; X86-SSE-NEXT: fstp %st(0) diff --git a/llvm/test/CodeGen/X86/funnel-shift-rot.ll b/llvm/test/CodeGen/X86/funnel-shift-rot.ll --- a/llvm/test/CodeGen/X86/funnel-shift-rot.ll +++ b/llvm/test/CodeGen/X86/funnel-shift-rot.ll @@ -130,9 +130,9 @@ define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind { ; X32-SSE2-LABEL: rotl_v4i32: ; X32-SSE2: # %bb.0: -; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE2-NEXT: pslld $23, %xmm1 -; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X32-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -324,9 +324,9 @@ ; X32-SSE2: # %bb.0: ; X32-SSE2-NEXT: pxor %xmm2, %xmm2 ; X32-SSE2-NEXT: psubd %xmm1, %xmm2 -; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X32-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X32-SSE2-NEXT: pslld $23, %xmm2 -; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 +; X32-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X32-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/haddsub-broadcast.ll b/llvm/test/CodeGen/X86/haddsub-broadcast.ll --- a/llvm/test/CodeGen/X86/haddsub-broadcast.ll +++ b/llvm/test/CodeGen/X86/haddsub-broadcast.ll @@ -8,7 +8,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] -; CHECK-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0 +; CHECK-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; CHECK-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll --- a/llvm/test/CodeGen/X86/half.ll +++ b/llvm/test/CodeGen/X86/half.ll @@ -364,7 +364,7 @@ ; CHECK-I686-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; CHECK-I686-NEXT: shrl $31, %eax ; CHECK-I686-NEXT: fildll {{[0-9]+}}(%esp) -; CHECK-I686-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; CHECK-I686-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; CHECK-I686-NEXT: fstps (%esp) ; CHECK-I686-NEXT: calll __gnu_f2h_ieee ; CHECK-I686-NEXT: movw %ax, (%esi) diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll --- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll @@ -499,7 +499,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -508,7 +508,7 @@ ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0 ; X86-SSE2-NEXT: retl ; @@ -586,7 +586,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -595,7 +595,7 @@ ; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0 ; X86-SSE2-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll --- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll @@ -461,7 +461,7 @@ ; X86-SSE2-LABEL: vec_4xi32_splat_eq: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] @@ -510,7 +510,7 @@ ; X86-SSE2-LABEL: vec_4xi32_nonsplat_eq: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3] @@ -563,12 +563,12 @@ ; X86-SSE2-NEXT: movl $1, %eax ; X86-SSE2-NEXT: movd %eax, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; X86-SSE2-NEXT: pand %xmm2, %xmm0 @@ -611,7 +611,7 @@ ; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] @@ -661,12 +661,12 @@ ; X86-SSE2-NEXT: movl $1, %eax ; X86-SSE2-NEXT: movd %eax, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] -; X86-SSE2-NEXT: pmuludq {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] ; X86-SSE2-NEXT: pand %xmm2, %xmm0 diff --git a/llvm/test/CodeGen/X86/i64-to-float.ll b/llvm/test/CodeGen/X86/i64-to-float.ll --- a/llvm/test/CodeGen/X86/i64-to-float.ll +++ b/llvm/test/CodeGen/X86/i64-to-float.ll @@ -14,7 +14,7 @@ ; X86-SSE-LABEL: mask_sitofp_2i64_2f64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X86-SSE-NEXT: retl ; @@ -32,7 +32,7 @@ ; ; X86-AVX512DQ-LABEL: mask_sitofp_2i64_2f64: ; X86-AVX512DQ: # %bb.0: -; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: retl ; @@ -69,7 +69,7 @@ ; X86-SSE-LABEL: mask_uitofp_2i64_2f64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 ; X86-SSE-NEXT: retl ; @@ -87,7 +87,7 @@ ; ; X86-AVX512DQ-LABEL: mask_uitofp_2i64_2f64: ; X86-AVX512DQ: # %bb.0: -; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: retl ; @@ -124,7 +124,7 @@ ; X86-SSE-LABEL: mask_sitofp_4i64_4f32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE-NEXT: retl ; @@ -132,7 +132,7 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX-NEXT: vzeroupper ; X86-AVX-NEXT: retl @@ -140,14 +140,14 @@ ; X86-AVX512F-LABEL: mask_sitofp_4i64_4f32: ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0 -; X86-AVX512F-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX512F-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX512F-NEXT: vzeroupper ; X86-AVX512F-NEXT: retl ; ; X86-AVX512DQ-LABEL: mask_sitofp_4i64_4f32: ; X86-AVX512DQ: # %bb.0: -; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0 ; X86-AVX512DQ-NEXT: vzeroupper ; X86-AVX512DQ-NEXT: retl @@ -191,7 +191,7 @@ ; X86-SSE-LABEL: mask_uitofp_4i64_4f32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE-NEXT: retl ; @@ -199,7 +199,7 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX-NEXT: vzeroupper ; X86-AVX-NEXT: retl @@ -207,14 +207,14 @@ ; X86-AVX512F-LABEL: mask_uitofp_4i64_4f32: ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0 -; X86-AVX512F-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX512F-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX512F-NEXT: vzeroupper ; X86-AVX512F-NEXT: retl ; ; X86-AVX512DQ-LABEL: mask_uitofp_4i64_4f32: ; X86-AVX512DQ: # %bb.0: -; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0 ; X86-AVX512DQ-NEXT: vzeroupper ; X86-AVX512DQ-NEXT: retl @@ -270,7 +270,7 @@ ; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] ; X86-SSE-NEXT: por %xmm2, %xmm3 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pandn {{\.LCPI.*}}, %xmm3 +; X86-SSE-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE-NEXT: por %xmm0, %xmm3 ; X86-SSE-NEXT: pxor %xmm3, %xmm1 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483903,0,2147483903,0] @@ -283,7 +283,7 @@ ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3] ; X86-SSE-NEXT: por %xmm0, %xmm1 ; X86-SSE-NEXT: pand %xmm1, %xmm3 -; X86-SSE-NEXT: pandn {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: por %xmm3, %xmm1 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0 @@ -305,16 +305,16 @@ ; ; X86-AVX512F-LABEL: clamp_sitofp_2i64_2f64: ; X86-AVX512F: # %bb.0: -; X86-AVX512F-NEXT: vpmaxsq {{\.LCPI.*}}, %xmm0, %xmm0 -; X86-AVX512F-NEXT: vpminsq {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX512F-NEXT: vpmaxsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X86-AVX512F-NEXT: vpminsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; X86-AVX512F-NEXT: vcvtdq2pd %xmm0, %xmm0 ; X86-AVX512F-NEXT: retl ; ; X86-AVX512DQ-LABEL: clamp_sitofp_2i64_2f64: ; X86-AVX512DQ: # %bb.0: -; X86-AVX512DQ-NEXT: vpmaxsq {{\.LCPI.*}}, %xmm0, %xmm0 -; X86-AVX512DQ-NEXT: vpminsq {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX512DQ-NEXT: vpmaxsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X86-AVX512DQ-NEXT: vpminsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0 ; X86-AVX512DQ-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll --- a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll +++ b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll @@ -16,7 +16,7 @@ ; X86-SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: andnps %xmm1, %xmm0 -; X86-SSE2-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X64-SSE2-LABEL: elt0_v16i8: @@ -393,7 +393,7 @@ ; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0] ; X86-AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; X86-AVX1-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm1 +; X86-AVX1-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1 ; X86-AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0] ; X86-AVX1-NEXT: retl ; @@ -410,7 +410,7 @@ ; X86-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX2-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0] ; X86-AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0] -; X86-AVX2-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm1 +; X86-AVX2-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1 ; X86-AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0] ; X86-AVX2-NEXT: retl ; @@ -428,7 +428,7 @@ ; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; X86-AVX512F-NEXT: vmovaps {{.*#+}} xmm2 = [4,0,0,0] ; X86-AVX512F-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] -; X86-AVX512F-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm1, %ymm1 +; X86-AVX512F-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 ; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X86-AVX512F-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll --- a/llvm/test/CodeGen/X86/insertelement-var-index.ll +++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll @@ -1380,7 +1380,7 @@ ; AVX1-NEXT: vmovd %edi, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm2, %xmm3 -; AVX1-NEXT: vpcmpeqd {{\.LCPI.*}}+{{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; AVX1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1424,7 +1424,7 @@ ; AVX1-NEXT: vmovq %rax, %xmm2 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1] ; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm2, %xmm3 -; AVX1-NEXT: vpcmpeqq {{\.LCPI.*}}+{{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm2, %xmm2 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2 ; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1661,7 +1661,7 @@ ; AVX1-NEXT: vmovd %esi, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %xmm2 -; AVX1-NEXT: vpcmpeqd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vbroadcastss (%rdi), %ymm2 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 @@ -1706,7 +1706,7 @@ ; AVX1-NEXT: vmovq %rax, %xmm1 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] ; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %xmm2 -; AVX1-NEXT: vpcmpeqq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vbroadcastsd (%rdi), %ymm2 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -74,7 +74,7 @@ define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind { ; X32-LABEL: knownbits_mask_shuffle_sext: ; X32: # %bb.0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X32-NEXT: retl @@ -94,7 +94,7 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind { ; X32-LABEL: knownbits_mask_shuffle_shuffle_sext: ; X32: # %bb.0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X32-NEXT: retl @@ -115,7 +115,7 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounwind { ; X32-LABEL: knownbits_mask_shuffle_shuffle_undef_sext: ; X32: # %bb.0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X32-NEXT: vpmovsxwd %xmm0, %xmm0 ; X32-NEXT: retl @@ -136,7 +136,7 @@ define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind { ; X32-LABEL: knownbits_mask_shuffle_uitofp: ; X32: # %bb.0: -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl @@ -173,8 +173,8 @@ define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind { ; X32-LABEL: knownbits_mask_xor_shuffle_uitofp: ; X32: # %bb.0: -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X32-NEXT: vxorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl @@ -384,8 +384,8 @@ define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) nounwind { ; X32-LABEL: knownbits_mask_concat_uitofp: ; X32: # %bb.0: -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2] ; X32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[1,3,1,3] ; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -432,8 +432,8 @@ define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) { ; X32-LABEL: knownbits_smax_smin_shuffle_uitofp: ; X32: # %bb.0: -; X32-NEXT: vpminsd {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vpmaxsd {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpminsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X32-NEXT: vpmaxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl @@ -457,7 +457,7 @@ define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) { ; X32-LABEL: knownbits_umin_shuffle_uitofp: ; X32: # %bb.0: -; X32-NEXT: vpminud {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpminud {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl @@ -495,8 +495,8 @@ define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) { ; X32-LABEL: knownbits_mask_umax_shuffle_uitofp: ; X32: # %bb.0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 +; X32-NEXT: vpmaxud {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl @@ -540,7 +540,7 @@ ; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] ; X32-NEXT: vpsrld $16, %xmm0, %xmm0 ; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; X32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vsubps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; X32-NEXT: retl ; @@ -563,7 +563,7 @@ define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) { ; X32-LABEL: knownbits_or_abs_uitofp: ; X32: # %bb.0: -; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] ; X32-NEXT: vpabsd %xmm0, %xmm0 ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 @@ -593,8 +593,8 @@ ; X32-NEXT: andl $-16, %esp ; X32-NEXT: subl $16, %esp ; X32-NEXT: vmovaps 8(%ebp), %xmm3 -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm2, %xmm2 -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm2, %xmm2 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm3, %xmm3 ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] @@ -630,7 +630,7 @@ ; X32-NEXT: subl $16, %esp ; X32-NEXT: vmovaps 8(%ebp), %xmm3 ; X32-NEXT: vpsrld $5, %xmm2, %xmm2 -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3 +; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm3, %xmm3 ; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2] diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -515,7 +515,7 @@ ; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] -; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-AVX1-LABEL: signbits_mask_ashr_smax: @@ -553,7 +553,7 @@ ; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpminsd %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] -; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-AVX1-LABEL: signbits_mask_ashr_smin: @@ -591,7 +591,7 @@ ; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpmaxud %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] -; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-AVX1-LABEL: signbits_mask_ashr_umax: @@ -629,7 +629,7 @@ ; X86-NEXT: vpsrad $25, %xmm1, %xmm1 ; X86-NEXT: vpminud %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] -; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-AVX1-LABEL: signbits_mask_ashr_umin: @@ -674,7 +674,7 @@ ; X86-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0 ; X86-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-NEXT: vandnps %ymm1, %ymm0, %ymm1 -; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: vorps %ymm1, %ymm0, %ymm0 ; X86-NEXT: vmovaps %ymm0, (%eax) ; X86-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/limited-prec.ll b/llvm/test/CodeGen/X86/limited-prec.ll --- a/llvm/test/CodeGen/X86/limited-prec.ll +++ b/llvm/test/CodeGen/X86/limited-prec.ll @@ -8,7 +8,7 @@ ; precision6: # %bb.0: # %entry ; precision6-NEXT: subl $20, %esp ; precision6-NEXT: flds {{[0-9]+}}(%esp) -; precision6-NEXT: fmuls {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fnstcw (%esp) ; precision6-NEXT: movzwl (%esp), %eax ; precision6-NEXT: orl $3072, %eax # imm = 0xC00 @@ -20,10 +20,10 @@ ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: fisubl {{[0-9]+}}(%esp) ; precision6-NEXT: fld %st(0) -; precision6-NEXT: fmuls {{\.LCPI.*}} -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fmulp %st, %st(1) -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fstps {{[0-9]+}}(%esp) ; precision6-NEXT: shll $23, %eax ; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax @@ -36,7 +36,7 @@ ; precision12: # %bb.0: # %entry ; precision12-NEXT: subl $20, %esp ; precision12-NEXT: flds {{[0-9]+}}(%esp) -; precision12-NEXT: fmuls {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fnstcw (%esp) ; precision12-NEXT: movzwl (%esp), %eax ; precision12-NEXT: orl $3072, %eax # imm = 0xC00 @@ -48,12 +48,12 @@ ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: fisubl {{[0-9]+}}(%esp) ; precision12-NEXT: fld %st(0) -; precision12-NEXT: fmuls {{\.LCPI.*}} -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmulp %st, %st(1) -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fstps {{[0-9]+}}(%esp) ; precision12-NEXT: shll $23, %eax ; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax @@ -66,7 +66,7 @@ ; precision18: # %bb.0: # %entry ; precision18-NEXT: subl $20, %esp ; precision18-NEXT: flds {{[0-9]+}}(%esp) -; precision18-NEXT: fmuls {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fnstcw (%esp) ; precision18-NEXT: movzwl (%esp), %eax ; precision18-NEXT: orl $3072, %eax # imm = 0xC00 @@ -78,16 +78,16 @@ ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: fisubl {{[0-9]+}}(%esp) ; precision18-NEXT: fld %st(0) -; precision18-NEXT: fmuls {{\.LCPI.*}} -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fld1 ; precision18-NEXT: faddp %st, %st(1) @@ -122,10 +122,10 @@ ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: fisubl {{[0-9]+}}(%esp) ; precision6-NEXT: fld %st(0) -; precision6-NEXT: fmuls {{\.LCPI.*}} -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fmulp %st, %st(1) -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fstps {{[0-9]+}}(%esp) ; precision6-NEXT: shll $23, %eax ; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax @@ -149,12 +149,12 @@ ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: fisubl {{[0-9]+}}(%esp) ; precision12-NEXT: fld %st(0) -; precision12-NEXT: fmuls {{\.LCPI.*}} -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmulp %st, %st(1) -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fstps {{[0-9]+}}(%esp) ; precision12-NEXT: shll $23, %eax ; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax @@ -178,16 +178,16 @@ ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: fisubl {{[0-9]+}}(%esp) ; precision18-NEXT: fld %st(0) -; precision18-NEXT: fmuls {{\.LCPI.*}} -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fld1 ; precision18-NEXT: faddp %st, %st(1) @@ -211,7 +211,7 @@ ; precision6: # %bb.0: # %entry ; precision6-NEXT: subl $20, %esp ; precision6-NEXT: flds {{[0-9]+}}(%esp) -; precision6-NEXT: fmuls {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fnstcw (%esp) ; precision6-NEXT: movzwl (%esp), %eax ; precision6-NEXT: orl $3072, %eax # imm = 0xC00 @@ -223,10 +223,10 @@ ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: fisubl {{[0-9]+}}(%esp) ; precision6-NEXT: fld %st(0) -; precision6-NEXT: fmuls {{\.LCPI.*}} -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fmulp %st, %st(1) -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fstps {{[0-9]+}}(%esp) ; precision6-NEXT: shll $23, %eax ; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax @@ -239,7 +239,7 @@ ; precision12: # %bb.0: # %entry ; precision12-NEXT: subl $20, %esp ; precision12-NEXT: flds {{[0-9]+}}(%esp) -; precision12-NEXT: fmuls {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fnstcw (%esp) ; precision12-NEXT: movzwl (%esp), %eax ; precision12-NEXT: orl $3072, %eax # imm = 0xC00 @@ -251,12 +251,12 @@ ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: fisubl {{[0-9]+}}(%esp) ; precision12-NEXT: fld %st(0) -; precision12-NEXT: fmuls {{\.LCPI.*}} -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmulp %st, %st(1) -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fstps {{[0-9]+}}(%esp) ; precision12-NEXT: shll $23, %eax ; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax @@ -269,7 +269,7 @@ ; precision18: # %bb.0: # %entry ; precision18-NEXT: subl $20, %esp ; precision18-NEXT: flds {{[0-9]+}}(%esp) -; precision18-NEXT: fmuls {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fnstcw (%esp) ; precision18-NEXT: movzwl (%esp), %eax ; precision18-NEXT: orl $3072, %eax # imm = 0xC00 @@ -281,16 +281,16 @@ ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: fisubl {{[0-9]+}}(%esp) ; precision18-NEXT: fld %st(0) -; precision18-NEXT: fmuls {{\.LCPI.*}} -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmulp %st, %st(1) ; precision18-NEXT: fld1 ; precision18-NEXT: faddp %st, %st(1) @@ -324,12 +324,12 @@ ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: flds (%esp) ; precision6-NEXT: fld %st(0) -; precision6-NEXT: fmuls {{\.LCPI.*}} -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fmulp %st, %st(1) -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fildl {{[0-9]+}}(%esp) -; precision6-NEXT: fmuls {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: faddp %st, %st(1) ; precision6-NEXT: addl $8, %esp ; precision6-NEXT: retl @@ -348,16 +348,16 @@ ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: flds (%esp) ; precision12-NEXT: fld %st(0) -; precision12-NEXT: fmuls {{\.LCPI.*}} -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmulp %st, %st(1) -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fildl {{[0-9]+}}(%esp) -; precision12-NEXT: fmuls {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: faddp %st, %st(1) ; precision12-NEXT: addl $8, %esp ; precision12-NEXT: retl @@ -376,20 +376,20 @@ ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: flds (%esp) ; precision18-NEXT: fld %st(0) -; precision18-NEXT: fmuls {{\.LCPI.*}} -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmulp %st, %st(1) -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fildl {{[0-9]+}}(%esp) -; precision18-NEXT: fmuls {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: addl $8, %esp ; precision18-NEXT: retl @@ -416,10 +416,10 @@ ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: flds (%esp) ; precision6-NEXT: fld %st(0) -; precision6-NEXT: fmuls {{\.LCPI.*}} -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fmulp %st, %st(1) -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fiaddl {{[0-9]+}}(%esp) ; precision6-NEXT: addl $8, %esp ; precision6-NEXT: retl @@ -438,14 +438,14 @@ ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: flds (%esp) ; precision12-NEXT: fld %st(0) -; precision12-NEXT: fmuls {{\.LCPI.*}} -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmulp %st, %st(1) -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fiaddl {{[0-9]+}}(%esp) ; precision12-NEXT: addl $8, %esp ; precision12-NEXT: retl @@ -464,18 +464,18 @@ ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: flds (%esp) ; precision18-NEXT: fld %st(0) -; precision18-NEXT: fmuls {{\.LCPI.*}} -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmulp %st, %st(1) -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fiaddl {{[0-9]+}}(%esp) ; precision18-NEXT: addl $8, %esp ; precision18-NEXT: retl @@ -502,12 +502,12 @@ ; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision6-NEXT: flds (%esp) ; precision6-NEXT: fld %st(0) -; precision6-NEXT: fmuls {{\.LCPI.*}} -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fmulp %st, %st(1) -; precision6-NEXT: fadds {{\.LCPI.*}} +; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: fildl {{[0-9]+}}(%esp) -; precision6-NEXT: fmuls {{\.LCPI.*}} +; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision6-NEXT: faddp %st, %st(1) ; precision6-NEXT: addl $8, %esp ; precision6-NEXT: retl @@ -526,14 +526,14 @@ ; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision12-NEXT: flds (%esp) ; precision12-NEXT: fld %st(0) -; precision12-NEXT: fmuls {{\.LCPI.*}} -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmul %st(1), %st -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fmulp %st, %st(1) -; precision12-NEXT: fadds {{\.LCPI.*}} +; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: fildl {{[0-9]+}}(%esp) -; precision12-NEXT: fmuls {{\.LCPI.*}} +; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision12-NEXT: faddp %st, %st(1) ; precision12-NEXT: addl $8, %esp ; precision12-NEXT: retl @@ -552,18 +552,18 @@ ; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp) ; precision18-NEXT: flds (%esp) ; precision18-NEXT: fld %st(0) -; precision18-NEXT: fmuls {{\.LCPI.*}} -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmul %st(1), %st -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fmulp %st, %st(1) -; precision18-NEXT: fadds {{\.LCPI.*}} +; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: fildl {{[0-9]+}}(%esp) -; precision18-NEXT: fmuls {{\.LCPI.*}} +; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; precision18-NEXT: faddp %st, %st(1) ; precision18-NEXT: addl $8, %esp ; precision18-NEXT: retl diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -517,13 +517,13 @@ ; SKX_LARGE: # %bb.0: # %entry ; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2 ; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1} @@ -531,12 +531,12 @@ ; ; SKX_32-LABEL: test9: ; SKX_32: # %bb.0: # %entry -; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1 +; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm1, %ymm1 ; SKX_32-NEXT: vpmovqd %zmm0, %ymm0 -; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 +; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1 +; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1} ; SKX_32-NEXT: retl @@ -603,13 +603,13 @@ ; SKX_LARGE: # %bb.0: # %entry ; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2 ; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1} @@ -617,12 +617,12 @@ ; ; SKX_32-LABEL: test10: ; SKX_32: # %bb.0: # %entry -; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1 +; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm1, %ymm1 ; SKX_32-NEXT: vpmovqd %zmm0, %ymm0 -; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 +; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0 ; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1 +; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1} ; SKX_32-NEXT: retl @@ -2893,7 +2893,7 @@ ; KNL_32-LABEL: zext_index: ; KNL_32: # %bb.0: ; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax -; KNL_32-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm1 +; KNL_32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm1 ; KNL_32-NEXT: kxnorw %k0, %k0, %k1 ; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1} ; KNL_32-NEXT: retl @@ -2907,7 +2907,7 @@ ; ; SKX_LARGE-LABEL: zext_index: ; SKX_LARGE: # %bb.0: -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vandps (%rax){1to16}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1} @@ -2916,7 +2916,7 @@ ; SKX_32-LABEL: zext_index: ; SKX_32: # %bb.0: ; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax -; SKX_32-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm1 +; SKX_32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1} ; SKX_32-NEXT: retl @@ -3184,7 +3184,7 @@ ; SKX_LARGE: # %bb.0: ; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm1 ; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1} @@ -3241,7 +3241,7 @@ ; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm1 ; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpgatherdd (%rdi,%zmm1,4), %zmm0 {%k1} @@ -3300,7 +3300,7 @@ ; SKX_LARGE: # %bb.0: ; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0 ; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm0 ; SKX_LARGE-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1} ; SKX_LARGE-NEXT: retq @@ -3355,7 +3355,7 @@ ; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0 ; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0 ; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1 -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm0 ; SKX_LARGE-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k1} ; SKX_LARGE-NEXT: vzeroupper @@ -3506,7 +3506,7 @@ ; ; SKX_LARGE-LABEL: pr45906: ; SKX_LARGE: # %bb.0: # %bb -; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax +; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax ; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1 ; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1 ; SKX_LARGE-NEXT: vpgatherqq (,%zmm1), %zmm0 {%k1} @@ -3514,7 +3514,7 @@ ; ; SKX_32-LABEL: pr45906: ; SKX_32: # %bb.0: # %bb -; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1 +; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1 ; SKX_32-NEXT: kxnorw %k0, %k0, %k1 ; SKX_32-NEXT: vpgatherdq (,%ymm1), %zmm0 {%k1} ; SKX_32-NEXT: retl diff --git a/llvm/test/CodeGen/X86/memcmp-minsize.ll b/llvm/test/CodeGen/X86/memcmp-minsize.ll --- a/llvm/test/CodeGen/X86/memcmp-minsize.ll +++ b/llvm/test/CodeGen/X86/memcmp-minsize.ll @@ -456,7 +456,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: sete %al diff --git a/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll b/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll --- a/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll +++ b/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll @@ -1480,7 +1480,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: sete %al @@ -1490,7 +1490,7 @@ ; X86-SSE41: # %bb.0: ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: sete %al ; X86-SSE41-NEXT: retl @@ -1823,8 +1823,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -1836,8 +1836,8 @@ ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: setne %al @@ -2312,8 +2312,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -2325,8 +2325,8 @@ ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: setne %al @@ -2816,8 +2816,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -2829,8 +2829,8 @@ ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: setne %al @@ -3293,9 +3293,9 @@ ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: pand %xmm1, %xmm2 ; X86-SSE2-NEXT: pand %xmm0, %xmm2 ; X86-SSE2-NEXT: pmovmskb %xmm2, %eax @@ -3309,9 +3309,9 @@ ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE41-NEXT: por %xmm1, %xmm2 ; X86-SSE41-NEXT: por %xmm0, %xmm2 ; X86-SSE41-NEXT: ptest %xmm2, %xmm2 @@ -3673,12 +3673,12 @@ ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE2-NEXT: movdqu 47(%eax), %xmm3 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: pand %xmm3, %xmm2 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pand %xmm2, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -3692,12 +3692,12 @@ ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE41-NEXT: movdqu 47(%eax), %xmm3 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE41-NEXT: por %xmm3, %xmm2 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE41-NEXT: por %xmm2, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: sete %al @@ -4079,12 +4079,12 @@ ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE2-NEXT: movdqu 48(%eax), %xmm3 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: pand %xmm3, %xmm2 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pand %xmm2, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -4098,12 +4098,12 @@ ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 ; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 ; X86-SSE41-NEXT: movdqu 48(%eax), %xmm3 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE41-NEXT: por %xmm3, %xmm2 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE41-NEXT: por %xmm2, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: sete %al diff --git a/llvm/test/CodeGen/X86/memcmp-optsize.ll b/llvm/test/CodeGen/X86/memcmp-optsize.ll --- a/llvm/test/CodeGen/X86/memcmp-optsize.ll +++ b/llvm/test/CodeGen/X86/memcmp-optsize.ll @@ -590,7 +590,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: sete %al @@ -715,8 +715,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -854,8 +854,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF diff --git a/llvm/test/CodeGen/X86/memcmp-pgso.ll b/llvm/test/CodeGen/X86/memcmp-pgso.ll --- a/llvm/test/CodeGen/X86/memcmp-pgso.ll +++ b/llvm/test/CodeGen/X86/memcmp-pgso.ll @@ -590,7 +590,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: sete %al @@ -715,8 +715,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -854,8 +854,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF diff --git a/llvm/test/CodeGen/X86/memcmp.ll b/llvm/test/CodeGen/X86/memcmp.ll --- a/llvm/test/CodeGen/X86/memcmp.ll +++ b/llvm/test/CodeGen/X86/memcmp.ll @@ -1485,7 +1485,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X86-SSE2-NEXT: sete %al @@ -1495,7 +1495,7 @@ ; X86-SSE41: # %bb.0: ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: sete %al ; X86-SSE41-NEXT: retl @@ -1756,8 +1756,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -1769,8 +1769,8 @@ ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: setne %al @@ -2152,8 +2152,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -2165,8 +2165,8 @@ ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: setne %al @@ -2563,8 +2563,8 @@ ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE2-NEXT: movdqu (%eax), %xmm0 ; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 -; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 ; X86-SSE2-NEXT: pmovmskb %xmm0, %eax ; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF @@ -2576,8 +2576,8 @@ ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE41-NEXT: movdqu (%eax), %xmm0 ; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 -; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE41-NEXT: por %xmm1, %xmm0 ; X86-SSE41-NEXT: ptest %xmm0, %xmm0 ; X86-SSE41-NEXT: setne %al diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -501,7 +501,7 @@ ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX-NEXT: vmovups (%eax), %ymm0 -; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX-NEXT: retl %ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0 %ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3 diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll --- a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll @@ -138,7 +138,7 @@ ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0 -; X86-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-AVX512F-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds double, double* %ptr, i64 1 %ptr2 = getelementptr inbounds double, double* %ptr, i64 3 @@ -217,7 +217,7 @@ ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0 -; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-AVX512F-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1 %ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3 @@ -436,7 +436,7 @@ ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: vmovdqu64 (%eax), %zmm0 -; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-AVX512F-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0 %ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3 diff --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll --- a/llvm/test/CodeGen/X86/mmx-arith.ll +++ b/llvm/test/CodeGen/X86/mmx-arith.ll @@ -33,7 +33,7 @@ ; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X32-NEXT: pmullw %xmm0, %xmm1 -; X32-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: packuswb %xmm1, %xmm1 ; X32-NEXT: movq %xmm1, (%eax) ; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero @@ -658,7 +658,7 @@ ; X32-NEXT: .cfi_def_cfa_register %ebp ; X32-NEXT: andl $-8, %esp ; X32-NEXT: subl $8, %esp -; X32-NEXT: movq {{\.LCPI.*}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA +; X32-NEXT: movq {{\.LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA ; X32-NEXT: psrad $255, %mm0 ; X32-NEXT: movq %mm0, (%esp) ; X32-NEXT: movl (%esp), %eax diff --git a/llvm/test/CodeGen/X86/mmx-fold-zero.ll b/llvm/test/CodeGen/X86/mmx-fold-zero.ll --- a/llvm/test/CodeGen/X86/mmx-fold-zero.ll +++ b/llvm/test/CodeGen/X86/mmx-fold-zero.ll @@ -32,7 +32,7 @@ ; X86-NEXT: paddw %mm2, %mm0 ; X86-NEXT: paddw %mm6, %mm0 ; X86-NEXT: pmuludq %mm3, %mm0 -; X86-NEXT: paddw {{\.LCPI.*}}, %mm0 +; X86-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %mm0 ; X86-NEXT: paddw %mm1, %mm0 ; X86-NEXT: pmuludq %mm7, %mm0 ; X86-NEXT: pmuludq (%esp), %mm0 # 8-byte Folded Reload @@ -70,7 +70,7 @@ ; X64-NEXT: paddw %mm2, %mm0 ; X64-NEXT: paddw %mm6, %mm0 ; X64-NEXT: pmuludq %mm3, %mm0 -; X64-NEXT: paddw {{\.LCPI.*}}, %mm0 +; X64-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %mm0 ; X64-NEXT: paddw %mm1, %mm0 ; X64-NEXT: pmuludq %mm7, %mm0 ; X64-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload diff --git a/llvm/test/CodeGen/X86/neg_fp.ll b/llvm/test/CodeGen/X86/neg_fp.ll --- a/llvm/test/CodeGen/X86/neg_fp.ll +++ b/llvm/test/CodeGen/X86/neg_fp.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: pushl %eax ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: subss {{[0-9]+}}(%esp), %xmm0 -; CHECK-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: movss %xmm0, (%esp) ; CHECK-NEXT: flds (%esp) ; CHECK-NEXT: popl %eax diff --git a/llvm/test/CodeGen/X86/nontemporal.ll b/llvm/test/CodeGen/X86/nontemporal.ll --- a/llvm/test/CodeGen/X86/nontemporal.ll +++ b/llvm/test/CodeGen/X86/nontemporal.ll @@ -20,21 +20,21 @@ ; X86-SSE-NEXT: movl 8(%ebp), %esi ; X86-SSE-NEXT: movl 80(%ebp), %edx ; X86-SSE-NEXT: movl (%edx), %eax -; X86-SSE-NEXT: addps {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: addps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movntps %xmm0, (%esi) -; X86-SSE-NEXT: paddq {{\.LCPI.*}}, %xmm2 +; X86-SSE-NEXT: paddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: movntdq %xmm2, (%esi) -; X86-SSE-NEXT: addpd {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: addpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: movntpd %xmm1, (%esi) -; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm6 +; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm6 ; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: movntdq %xmm6, (%esi) -; X86-SSE-NEXT: paddw {{\.LCPI.*}}, %xmm5 +; X86-SSE-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm5 ; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: movntdq %xmm5, (%esi) -; X86-SSE-NEXT: paddb {{\.LCPI.*}}, %xmm4 +; X86-SSE-NEXT: paddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE-NEXT: addl (%edx), %eax ; X86-SSE-NEXT: movntdq %xmm4, (%esi) ; X86-SSE-NEXT: addl (%edx), %eax @@ -62,21 +62,21 @@ ; X86-AVX-NEXT: movl 8(%ebp), %edx ; X86-AVX-NEXT: movl 80(%ebp), %esi ; X86-AVX-NEXT: movl (%esi), %eax -; X86-AVX-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovntps %xmm0, (%edx) -; X86-AVX-NEXT: vpaddq {{\.LCPI.*}}, %xmm2, %xmm0 +; X86-AVX-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm2, %xmm0 ; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) -; X86-AVX-NEXT: vaddpd {{\.LCPI.*}}, %xmm1, %xmm0 +; X86-AVX-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0 ; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: vmovntpd %xmm0, (%edx) -; X86-AVX-NEXT: vpaddd {{\.LCPI.*}}, %xmm6, %xmm0 +; X86-AVX-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm6, %xmm0 ; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) -; X86-AVX-NEXT: vpaddw {{\.LCPI.*}}, %xmm5, %xmm0 +; X86-AVX-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm5, %xmm0 ; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) -; X86-AVX-NEXT: vpaddb {{\.LCPI.*}}, %xmm4, %xmm0 +; X86-AVX-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm4, %xmm0 ; X86-AVX-NEXT: addl (%esi), %eax ; X86-AVX-NEXT: vmovntdq %xmm0, (%edx) ; X86-AVX-NEXT: addl (%esi), %eax diff --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll --- a/llvm/test/CodeGen/X86/packss.ll +++ b/llvm/test/CodeGen/X86/packss.ll @@ -121,14 +121,14 @@ ; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrad $31, %xmm0 -; X86-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pcmpgtd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: packssdw %xmm1, %xmm0 ; X86-SSE-NEXT: retl ; ; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0 -; X86-AVX-NEXT: vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1 +; X86-AVX-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 ; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/peep-test-1.ll b/llvm/test/CodeGen/X86/peep-test-1.ll --- a/llvm/test/CodeGen/X86/peep-test-1.ll +++ b/llvm/test/CodeGen/X86/peep-test-1.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: .LBB0_1: # %bb ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: fldl (%eax,%ecx,8) -; CHECK-NEXT: fmull {{\.LCPI.*}} +; CHECK-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}} ; CHECK-NEXT: fstpl (%eax,%ecx,8) ; CHECK-NEXT: decl %ecx ; CHECK-NEXT: js .LBB0_1 diff --git a/llvm/test/CodeGen/X86/pointer-vector.ll b/llvm/test/CodeGen/X86/pointer-vector.ll --- a/llvm/test/CodeGen/X86/pointer-vector.ll +++ b/llvm/test/CodeGen/X86/pointer-vector.ll @@ -133,7 +133,7 @@ ; CHECK-NEXT: movdqa (%ecx), %xmm0 ; CHECK-NEXT: pcmpgtd (%eax), %xmm0 ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6] -; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1 +; CHECK-NEXT: blendvps %xmm0, {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: retl entry: @@ -152,7 +152,7 @@ ; CHECK-NEXT: movdqa (%ecx), %xmm0 ; CHECK-NEXT: pcmpeqd (%eax), %xmm0 ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6] -; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1 +; CHECK-NEXT: blendvps %xmm0, {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; CHECK-NEXT: movaps %xmm1, %xmm0 ; CHECK-NEXT: retl entry: diff --git a/llvm/test/CodeGen/X86/popcnt.ll b/llvm/test/CodeGen/X86/popcnt.ll --- a/llvm/test/CodeGen/X86/popcnt.ll +++ b/llvm/test/CodeGen/X86/popcnt.ll @@ -253,7 +253,7 @@ ; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 @@ -264,7 +264,7 @@ ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: psadbw %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax @@ -749,7 +749,7 @@ ; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 @@ -760,7 +760,7 @@ ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: psadbw %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax @@ -1178,7 +1178,7 @@ ; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 @@ -1189,7 +1189,7 @@ ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pxor %xmm0, %xmm0 ; X86-SSE2-NEXT: psadbw %xmm1, %xmm0 ; X86-SSE2-NEXT: movd %xmm0, %eax diff --git a/llvm/test/CodeGen/X86/pr15309.ll b/llvm/test/CodeGen/X86/pr15309.ll --- a/llvm/test/CodeGen/X86/pr15309.ll +++ b/llvm/test/CodeGen/X86/pr15309.ll @@ -19,10 +19,10 @@ ; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp) ; CHECK-NEXT: shrl $31, %ecx ; CHECK-NEXT: fildll (%esp) -; CHECK-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; CHECK-NEXT: shrl $31, %esi ; CHECK-NEXT: fildll {{[0-9]+}}(%esp) -; CHECK-NEXT: fadds {{\.LCPI.*}}(,%esi,4) +; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%esi,4) ; CHECK-NEXT: fstps 84(%eax) ; CHECK-NEXT: fstps 80(%eax) ; CHECK-NEXT: addl $20, %esp diff --git a/llvm/test/CodeGen/X86/pr34080-2.ll b/llvm/test/CodeGen/X86/pr34080-2.ll --- a/llvm/test/CodeGen/X86/pr34080-2.ll +++ b/llvm/test/CodeGen/X86/pr34080-2.ll @@ -48,8 +48,8 @@ ; CHECK-NEXT: leal 257(%ecx,%edx), %eax ; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) ; CHECK-NEXT: fildl {{[0-9]+}}(%esp) -; CHECK-NEXT: fadds {{\.LCPI.*}} -; CHECK-NEXT: fmuls {{\.LCPI.*}} +; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}} +; CHECK-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: orl $3072, %eax # imm = 0xC00 @@ -62,7 +62,7 @@ ; CHECK-NEXT: imull $60000, 24(%ebx), %ecx # imm = 0xEA60 ; CHECK-NEXT: addl %eax, %ecx ; CHECK-NEXT: fldl 28(%ebx) -; CHECK-NEXT: fmuls {{\.LCPI.*}} +; CHECK-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}} ; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp) ; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: orl $3072, %eax # imm = 0xC00 diff --git a/llvm/test/CodeGen/X86/pr34605.ll b/llvm/test/CodeGen/X86/pr34605.ll --- a/llvm/test/CodeGen/X86/pr34605.ll +++ b/llvm/test/CodeGen/X86/pr34605.ll @@ -6,18 +6,18 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %zmm0 -; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k0 -; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1 +; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k0 +; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k1 ; CHECK-NEXT: kunpckwd %k0, %k1, %k0 -; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1 -; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k2 +; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k1 +; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k2 ; CHECK-NEXT: kunpckwd %k1, %k2, %k1 ; CHECK-NEXT: kunpckdq %k0, %k1, %k0 ; CHECK-NEXT: movl $1, %ecx ; CHECK-NEXT: kmovd %ecx, %k1 ; CHECK-NEXT: kmovd %k1, %k1 ; CHECK-NEXT: kandq %k1, %k0, %k1 -; CHECK-NEXT: vmovdqu8 {{\.LCPI.*}}, %zmm0 {%k1} {z} +; CHECK-NEXT: vmovdqu8 {{\.LCPI[0-9]+_[0-9]+}}, %zmm0 {%k1} {z} ; CHECK-NEXT: vmovdqu64 %zmm0, (%eax) ; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vmovdqu64 %zmm0, 64(%eax) diff --git a/llvm/test/CodeGen/X86/pr40539.ll b/llvm/test/CodeGen/X86/pr40539.ll --- a/llvm/test/CodeGen/X86/pr40539.ll +++ b/llvm/test/CodeGen/X86/pr40539.ll @@ -41,7 +41,7 @@ ; CHECK-NEXT: .cfi_def_cfa_offset 12 ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; CHECK-NEXT: divss {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: divss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: movss %xmm0, {{[0-9]+}}(%esp) ; CHECK-NEXT: flds {{[0-9]+}}(%esp) ; CHECK-NEXT: #APP @@ -51,7 +51,7 @@ ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: ucomiss %xmm0, %xmm1 ; CHECK-NEXT: setae %cl -; CHECK-NEXT: ucomiss {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: setae %al ; CHECK-NEXT: andb %cl, %al ; CHECK-NEXT: addl $8, %esp diff --git a/llvm/test/CodeGen/X86/pr40891.ll b/llvm/test/CodeGen/X86/pr40891.ll --- a/llvm/test/CodeGen/X86/pr40891.ll +++ b/llvm/test/CodeGen/X86/pr40891.ll @@ -7,7 +7,7 @@ ; CHECK-LABEL: foo: ; CHECK: # %bb.0: ; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0 -; CHECK-NEXT: vandps {{\.LCPI.*}}, %ymm1, %ymm1 +; CHECK-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 ; CHECK-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6] diff --git a/llvm/test/CodeGen/X86/pr46527.ll b/llvm/test/CodeGen/X86/pr46527.ll --- a/llvm/test/CodeGen/X86/pr46527.ll +++ b/llvm/test/CodeGen/X86/pr46527.ll @@ -22,7 +22,7 @@ ; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0] ; CHECK-NEXT: paddb %xmm1, %xmm1 ; CHECK-NEXT: pxor %xmm0, %xmm1 -; CHECK-NEXT: pxor {{\.LCPI.*}}@GOTOFF(%eax), %xmm1 +; CHECK-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}@GOTOFF(%eax), %xmm1 ; CHECK-NEXT: movdqa %xmm1, (%ecx) ; CHECK-NEXT: retl entry: diff --git a/llvm/test/CodeGen/X86/pr47299.ll b/llvm/test/CodeGen/X86/pr47299.ll --- a/llvm/test/CodeGen/X86/pr47299.ll +++ b/llvm/test/CodeGen/X86/pr47299.ll @@ -13,7 +13,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: mov rax, rdi ; CHECK-NEXT: vpbroadcastq zmm0, rsi -; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kshiftrb k1, k0, 6 ; CHECK-NEXT: kmovd r8d, k1 ; CHECK-NEXT: kshiftrb k1, k0, 5 @@ -57,8 +57,8 @@ ; CHECK-LABEL: create_mask16: ; CHECK: # %bb.0: ; CHECK-NEXT: vpbroadcastq zmm0, rdi -; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k0, k1, k0 ; CHECK-NEXT: vpmovm2b xmm0, k0 ; CHECK-NEXT: vzeroupper @@ -71,11 +71,11 @@ ; CHECK-LABEL: create_mask32: ; CHECK: # %bb.0: ; CHECK-NEXT: vpbroadcastq zmm0, rdi -; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k0, k1, k0 -; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k1, k1, k2 ; CHECK-NEXT: kunpckwd k0, k1, k0 ; CHECK-NEXT: vpmovm2b ymm0, k0 @@ -88,18 +88,18 @@ ; CHECK-LABEL: create_mask64: ; CHECK: # %bb.0: ; CHECK-NEXT: vpbroadcastq zmm0, rdi -; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k0, k1, k0 -; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k1, k1, k2 -; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckwd k0, k1, k0 -; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k1, k1, k2 -; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckbw k2, k3, k2 ; CHECK-NEXT: kunpckwd k1, k2, k1 ; CHECK-NEXT: kunpckdq k0, k1, k0 @@ -113,7 +113,7 @@ ; CHECK-LABEL: create_mask16_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vpbroadcastd zmm0, edi -; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: vpmovm2b xmm0, k0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: ret @@ -125,11 +125,11 @@ ; CHECK-LABEL: create_mask64_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vpbroadcastd zmm0, edi -; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] -; CHECK-NEXT: vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] +; CHECK-NEXT: vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckwd k0, k1, k0 -; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}] +; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}] ; CHECK-NEXT: kunpckwd k1, k1, k2 ; CHECK-NEXT: kunpckdq k0, k1, k0 ; CHECK-NEXT: vpmovm2b zmm0, k0 diff --git a/llvm/test/CodeGen/X86/rotate-extract-vector.ll b/llvm/test/CodeGen/X86/rotate-extract-vector.ll --- a/llvm/test/CodeGen/X86/rotate-extract-vector.ll +++ b/llvm/test/CodeGen/X86/rotate-extract-vector.ll @@ -109,7 +109,7 @@ ; X86-NEXT: vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9] ; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; X86-NEXT: vprold $7, %zmm0, %zmm0 -; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: vzeroupper ; X86-NEXT: retl ; @@ -132,8 +132,8 @@ define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind { ; X86-LABEL: illegal_no_extract_mul: ; X86: # %bb.0: -; X86-NEXT: vpmullw {{\.LCPI.*}}, %zmm0, %zmm1 -; X86-NEXT: vpmullw {{\.LCPI.*}}, %zmm0, %zmm0 +; X86-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm1 +; X86-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; X86-NEXT: vpsrlw $10, %zmm0, %zmm0 ; X86-NEXT: vporq %zmm0, %zmm1, %zmm0 ; X86-NEXT: retl diff --git a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll --- a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll +++ b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll @@ -267,7 +267,7 @@ ; X87-LIN: # %bb.0: ; X87-LIN-NEXT: subl $20, %esp ; X87-LIN-NEXT: flds {{[0-9]+}}(%esp) -; X87-LIN-NEXT: flds {{\.LCPI.*}} +; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-LIN-NEXT: fucom %st(1) ; X87-LIN-NEXT: fnstsw %ax ; X87-LIN-NEXT: xorl %edx, %edx @@ -691,7 +691,7 @@ ; X87-LIN: # %bb.0: ; X87-LIN-NEXT: subl $20, %esp ; X87-LIN-NEXT: fldl {{[0-9]+}}(%esp) -; X87-LIN-NEXT: flds {{\.LCPI.*}} +; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-LIN-NEXT: fucom %st(1) ; X87-LIN-NEXT: fnstsw %ax ; X87-LIN-NEXT: xorl %edx, %edx @@ -914,7 +914,7 @@ ; X86-AVX512-LIN: # %bb.0: ; X86-AVX512-LIN-NEXT: subl $12, %esp ; X86-AVX512-LIN-NEXT: fldt {{[0-9]+}}(%esp) -; X86-AVX512-LIN-NEXT: flds {{\.LCPI.*}} +; X86-AVX512-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-AVX512-LIN-NEXT: xorl %edx, %edx ; X86-AVX512-LIN-NEXT: fucomi %st(1), %st ; X86-AVX512-LIN-NEXT: fldz @@ -990,7 +990,7 @@ ; X86-SSE3-LIN: # %bb.0: ; X86-SSE3-LIN-NEXT: subl $12, %esp ; X86-SSE3-LIN-NEXT: fldt {{[0-9]+}}(%esp) -; X86-SSE3-LIN-NEXT: flds {{\.LCPI.*}} +; X86-SSE3-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE3-LIN-NEXT: xorl %edx, %edx ; X86-SSE3-LIN-NEXT: fucomi %st(1), %st ; X86-SSE3-LIN-NEXT: fldz @@ -1072,7 +1072,7 @@ ; X86-SSE2-LIN: # %bb.0: ; X86-SSE2-LIN-NEXT: subl $20, %esp ; X86-SSE2-LIN-NEXT: fldt {{[0-9]+}}(%esp) -; X86-SSE2-LIN-NEXT: flds {{\.LCPI.*}} +; X86-SSE2-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X86-SSE2-LIN-NEXT: xorl %edx, %edx ; X86-SSE2-LIN-NEXT: fucomi %st(1), %st ; X86-SSE2-LIN-NEXT: setbe %dl @@ -1180,7 +1180,7 @@ ; X87-LIN: # %bb.0: ; X87-LIN-NEXT: subl $20, %esp ; X87-LIN-NEXT: fldt {{[0-9]+}}(%esp) -; X87-LIN-NEXT: flds {{\.LCPI.*}} +; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; X87-LIN-NEXT: fucom %st(1) ; X87-LIN-NEXT: fnstsw %ax ; X87-LIN-NEXT: xorl %edx, %edx diff --git a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll --- a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll +++ b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll @@ -33,8 +33,8 @@ ; SSE2_32: # %bb.0: ; SSE2_32-NEXT: pushl %eax ; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE2_32-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; SSE2_32-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; SSE2_32-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE2_32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2_32-NEXT: cvtsd2ss %xmm0, %xmm0 ; SSE2_32-NEXT: movss %xmm0, (%esp) ; SSE2_32-NEXT: flds (%esp) @@ -147,8 +147,8 @@ ; SSE2_32-NEXT: andl $-8, %esp ; SSE2_32-NEXT: subl $8, %esp ; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE2_32-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; SSE2_32-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; SSE2_32-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE2_32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2_32-NEXT: movsd %xmm0, (%esp) ; SSE2_32-NEXT: fldl (%esp) ; SSE2_32-NEXT: movl %ebp, %esp @@ -333,7 +333,7 @@ ; AVX512F_32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp) ; AVX512F_32-NEXT: shrl $31, %eax ; AVX512F_32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX512F_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX512F_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX512F_32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX512F_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512F_32-NEXT: vmovss %xmm0, (%esp) @@ -353,7 +353,7 @@ ; SSE2_32-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; SSE2_32-NEXT: shrl $31, %eax ; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE2_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE2_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE2_32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE2_32-NEXT: movss %xmm0, (%esp) @@ -392,7 +392,7 @@ ; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: shrl $31, %ecx ; SSE1_32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; SSE1_32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE1_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE1_32-NEXT: movss %xmm0, (%esp) @@ -413,7 +413,7 @@ ; X87-NEXT: movl %eax, {{[0-9]+}}(%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll {{[0-9]+}}(%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstps {{[0-9]+}}(%esp) ; X87-NEXT: flds {{[0-9]+}}(%esp) ; X87-NEXT: movl %ebp, %esp @@ -652,7 +652,7 @@ ; AVX512F_32-NEXT: subl $8, %esp ; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] -; AVX512F_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512F_32-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512F_32-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512F_32-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; AVX512F_32-NEXT: vmovsd %xmm0, (%esp) @@ -669,7 +669,7 @@ ; SSE2_32-NEXT: subl $8, %esp ; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] -; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0 +; SSE2_32-NEXT: subpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2_32-NEXT: movapd %xmm0, %xmm1 ; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2_32-NEXT: addsd %xmm0, %xmm1 @@ -701,7 +701,7 @@ ; SSE1_32-NEXT: movl %eax, (%esp) ; SSE1_32-NEXT: shrl $31, %ecx ; SSE1_32-NEXT: fildll (%esp) -; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: movl %ebp, %esp @@ -720,7 +720,7 @@ ; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll (%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: movl %ebp, %esp @@ -774,7 +774,7 @@ ; AVX512F_32-NEXT: subl $8, %esp ; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] -; AVX512F_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512F_32-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512F_32-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX512F_32-NEXT: vmovlpd %xmm0, (%esp) ; AVX512F_32-NEXT: fldl (%esp) @@ -790,7 +790,7 @@ ; SSE2_32-NEXT: subl $8, %esp ; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] -; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0 +; SSE2_32-NEXT: subpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE2_32-NEXT: movapd %xmm0, %xmm1 ; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2_32-NEXT: addsd %xmm0, %xmm1 @@ -822,7 +822,7 @@ ; SSE1_32-NEXT: movl %eax, (%esp) ; SSE1_32-NEXT: shrl $31, %ecx ; SSE1_32-NEXT: fildll (%esp) -; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp) ; SSE1_32-NEXT: movl %ebp, %esp @@ -841,7 +841,7 @@ ; X87-NEXT: movl %eax, (%esp) ; X87-NEXT: shrl $31, %ecx ; X87-NEXT: fildll (%esp) -; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; X87-NEXT: fstpl {{[0-9]+}}(%esp) ; X87-NEXT: fldl {{[0-9]+}}(%esp) ; X87-NEXT: movl %ebp, %esp @@ -1076,7 +1076,7 @@ ; CHECK32-NEXT: movl %eax, (%esp) ; CHECK32-NEXT: shrl $31, %ecx ; CHECK32-NEXT: fildll (%esp) -; CHECK32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; CHECK32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; CHECK32-NEXT: movl %ebp, %esp ; CHECK32-NEXT: popl %ebp ; CHECK32-NEXT: retl @@ -1088,7 +1088,7 @@ ; CHECK64-NEXT: testq %rdi, %rdi ; CHECK64-NEXT: sets %al ; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp) -; CHECK64-NEXT: fadds {{\.LCPI.*}}(,%rax,4) +; CHECK64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4) ; CHECK64-NEXT: retq %r = uitofp i64 %a to x86_fp80 ret x86_fp80 %r diff --git a/llvm/test/CodeGen/X86/select-of-fp-constants.ll b/llvm/test/CodeGen/X86/select-of-fp-constants.ll --- a/llvm/test/CodeGen/X86/select-of-fp-constants.ll +++ b/llvm/test/CodeGen/X86/select-of-fp-constants.ll @@ -16,7 +16,7 @@ ; X86-NEXT: xorl %eax, %eax ; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp) ; X86-NEXT: sete %al -; X86-NEXT: flds {{\.LCPI.*}}(,%eax,4) +; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-NEXT: retl ; ; X64-SSE-LABEL: icmp_select_fp_constants: @@ -46,7 +46,7 @@ ; X86-SSE-NEXT: cmpneqss {{[0-9]+}}(%esp), %xmm0 ; X86-SSE-NEXT: movd %xmm0, %eax ; X86-SSE-NEXT: andl $1, %eax -; X86-SSE-NEXT: flds {{\.LCPI.*}}(,%eax,4) +; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-SSE-NEXT: retl ; ; X86-AVX2-LABEL: fcmp_select_fp_constants: @@ -55,15 +55,15 @@ ; X86-AVX2-NEXT: vcmpneqss {{[0-9]+}}(%esp), %xmm0, %xmm0 ; X86-AVX2-NEXT: vmovd %xmm0, %eax ; X86-AVX2-NEXT: andl $1, %eax -; X86-AVX2-NEXT: flds {{\.LCPI.*}}(,%eax,4) +; X86-AVX2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-AVX2-NEXT: retl ; ; X86-AVX512F-LABEL: fcmp_select_fp_constants: ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-AVX512F-NEXT: vcmpneqss {{\.LCPI.*}}, %xmm0, %k0 +; X86-AVX512F-NEXT: vcmpneqss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %k0 ; X86-AVX512F-NEXT: kmovw %k0, %eax -; X86-AVX512F-NEXT: flds {{\.LCPI.*}}(,%eax,4) +; X86-AVX512F-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-AVX512F-NEXT: retl ; ; X64-SSE-LABEL: fcmp_select_fp_constants: diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll --- a/llvm/test/CodeGen/X86/select.ll +++ b/llvm/test/CodeGen/X86/select.ll @@ -163,7 +163,7 @@ ; MCU-NEXT: xorl %ecx, %ecx ; MCU-NEXT: testl %eax, %eax ; MCU-NEXT: sete %cl -; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4) +; MCU-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4) ; MCU-NEXT: retl entry: %0 = icmp eq i32 %x, 0 @@ -197,7 +197,7 @@ ; MCU: # %bb.0: # %entry ; MCU-NEXT: movl %eax, %ecx ; MCU-NEXT: fldl {{[0-9]+}}(%esp) -; MCU-NEXT: flds {{\.LCPI.*}} +; MCU-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; MCU-NEXT: fucompp ; MCU-NEXT: fnstsw %ax ; MCU-NEXT: xorl %edx, %edx @@ -422,7 +422,7 @@ ; MCU-NEXT: notl %eax ; MCU-NEXT: shrl $27, %eax ; MCU-NEXT: andl $-16, %eax -; MCU-NEXT: fldt {{\.LCPI.*}}(%eax) +; MCU-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}(%eax) ; MCU-NEXT: retl %tmp9 = icmp sgt i32 %tmp8, -1 %retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000 diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll --- a/llvm/test/CodeGen/X86/setcc-lowering.ll +++ b/llvm/test/CodeGen/X86/setcc-lowering.ll @@ -22,7 +22,7 @@ ; KNL-32-LABEL: pr25080: ; KNL-32: # %bb.0: # %entry ; KNL-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 -; KNL-32-NEXT: vptestnmd {{\.LCPI.*}}{1to16}, %zmm0, %k0 +; KNL-32-NEXT: vptestnmd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %k0 ; KNL-32-NEXT: movb $15, %al ; KNL-32-NEXT: kmovw %eax, %k1 ; KNL-32-NEXT: korw %k1, %k0, %k1 diff --git a/llvm/test/CodeGen/X86/shrink-fp-const2.ll b/llvm/test/CodeGen/X86/shrink-fp-const2.ll --- a/llvm/test/CodeGen/X86/shrink-fp-const2.ll +++ b/llvm/test/CodeGen/X86/shrink-fp-const2.ll @@ -4,7 +4,7 @@ define x86_fp80 @test2() nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: flds {{\.LCPI.*}} +; CHECK-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}} ; CHECK-NEXT: retl entry: ret x86_fp80 0xK3FFFC000000000000000 diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll --- a/llvm/test/CodeGen/X86/shrink_vmul.ll +++ b/llvm/test/CodeGen/X86/shrink_vmul.ll @@ -1406,7 +1406,7 @@ ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: retl ; @@ -1418,7 +1418,7 @@ ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1471,7 +1471,7 @@ ; X86-SSE-NEXT: movd %ecx, %xmm0 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE-NEXT: psraw $8, %xmm0 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7] ; X86-SSE-NEXT: psrad $16, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) @@ -1485,7 +1485,7 @@ ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0 -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1540,7 +1540,7 @@ ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: retl ; @@ -1552,7 +1552,7 @@ ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1621,7 +1621,7 @@ ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1693,7 +1693,7 @@ ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0 -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1765,7 +1765,7 @@ ; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx ; X86-AVX-NEXT: vmovd %ecx, %xmm0 ; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0 -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1833,7 +1833,7 @@ ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1897,7 +1897,7 @@ ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0 -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -1947,7 +1947,7 @@ ; X86-SSE-NEXT: movl c, %edx ; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: psrld $16, %xmm0 -; X86-SSE-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psllq $32, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: retl @@ -1959,7 +1959,7 @@ ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; @@ -2009,7 +2009,7 @@ ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-SSE-NEXT: psrad $16, %xmm0 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] -; X86-SSE-NEXT: pmuludq {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psllq $32, %xmm0 ; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4) ; X86-SSE-NEXT: retl @@ -2021,7 +2021,7 @@ ; X86-AVX-NEXT: movl c, %edx ; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0 -; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4) ; X86-AVX-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/sink-addsub-of-const.ll b/llvm/test/CodeGen/X86/sink-addsub-of-const.ll --- a/llvm/test/CodeGen/X86/sink-addsub-of-const.ll +++ b/llvm/test/CodeGen/X86/sink-addsub-of-const.ll @@ -261,7 +261,7 @@ ; X32-LABEL: vec_sink_add_of_const_to_add0: ; X32: # %bb.0: ; X32-NEXT: paddd %xmm1, %xmm0 -; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_add_of_const_to_add0: @@ -277,7 +277,7 @@ ; X32-LABEL: vec_sink_add_of_const_to_add1: ; X32: # %bb.0: ; X32-NEXT: paddd %xmm1, %xmm0 -; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_add_of_const_to_add1: @@ -297,7 +297,7 @@ ; X32-LABEL: vec_sink_sub_of_const_to_add0: ; X32: # %bb.0: ; X32-NEXT: paddd %xmm1, %xmm0 -; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_sub_of_const_to_add0: @@ -313,7 +313,7 @@ ; X32-LABEL: vec_sink_sub_of_const_to_add1: ; X32: # %bb.0: ; X32-NEXT: paddd %xmm1, %xmm0 -; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_sub_of_const_to_add1: @@ -333,7 +333,7 @@ ; X32-LABEL: vec_sink_sub_from_const_to_add0: ; X32: # %bb.0: ; X32-NEXT: psubd %xmm0, %xmm1 -; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: retl ; @@ -351,7 +351,7 @@ ; X32-LABEL: vec_sink_sub_from_const_to_add1: ; X32: # %bb.0: ; X32-NEXT: psubd %xmm0, %xmm1 -; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: retl ; @@ -373,7 +373,7 @@ ; X32-LABEL: vec_sink_add_of_const_to_sub: ; X32: # %bb.0: ; X32-NEXT: psubd %xmm1, %xmm0 -; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_add_of_const_to_sub: @@ -389,7 +389,7 @@ ; X32-LABEL: vec_sink_add_of_const_to_sub2: ; X32: # %bb.0: ; X32-NEXT: psubd %xmm0, %xmm1 -; X32-NEXT: psubd {{\.LCPI.*}}, %xmm1 +; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: retl ; @@ -411,7 +411,7 @@ ; X32-LABEL: vec_sink_sub_of_const_to_sub: ; X32: # %bb.0: ; X32-NEXT: psubd %xmm1, %xmm0 -; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_sub_of_const_to_sub: @@ -427,7 +427,7 @@ ; X32-LABEL: vec_sink_sub_of_const_to_sub2: ; X32: # %bb.0: ; X32-NEXT: psubd %xmm0, %xmm1 -; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: movdqa %xmm1, %xmm0 ; X32-NEXT: retl ; @@ -461,7 +461,7 @@ ; X32-LABEL: vec_sink_sub_from_const_to_sub2: ; X32: # %bb.0: ; X32-NEXT: paddd %xmm1, %xmm0 -; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0 +; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: vec_sink_sub_from_const_to_sub2: diff --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll --- a/llvm/test/CodeGen/X86/slow-pmulld.ll +++ b/llvm/test/CodeGen/X86/slow-pmulld.ll @@ -21,7 +21,7 @@ ; CHECK32-LABEL: test_mul_v4i32_v4i8: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK32-NEXT: retl ; ; CHECK64-LABEL: test_mul_v4i32_v4i8: @@ -33,7 +33,7 @@ ; SSE4-32-LABEL: test_mul_v4i32_v4i8: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE4-32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE4-32-NEXT: retl ; ; SSE4-64-LABEL: test_mul_v4i32_v4i8: @@ -45,7 +45,7 @@ ; AVX2-32-LABEL: test_mul_v4i32_v4i8: ; AVX2-32: # %bb.0: ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX2-32-NEXT: retl ; ; AVX2-64-LABEL: test_mul_v4i32_v4i8: @@ -57,7 +57,7 @@ ; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8: ; AVX512DQ-32: # %bb.0: ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512DQ-32-NEXT: retl ; ; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8: @@ -69,7 +69,7 @@ ; AVX512BW-32-LABEL: test_mul_v4i32_v4i8: ; AVX512BW-32: # %bb.0: ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512BW-32-NEXT: retl ; ; AVX512BW-64-LABEL: test_mul_v4i32_v4i8: @@ -168,7 +168,7 @@ ; AVX2-32-LABEL: test_mul_v8i32_v8i8: ; AVX2-32: # %bb.0: ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX2-32-NEXT: retl ; ; AVX2-64-LABEL: test_mul_v8i32_v8i8: @@ -180,7 +180,7 @@ ; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8: ; AVX512DQ-32: # %bb.0: ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX512DQ-32-NEXT: retl ; ; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8: @@ -192,7 +192,7 @@ ; AVX512BW-32-LABEL: test_mul_v8i32_v8i8: ; AVX512BW-32: # %bb.0: ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX512BW-32-NEXT: retl ; ; AVX512BW-64-LABEL: test_mul_v8i32_v8i8: @@ -359,7 +359,7 @@ ; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8: ; AVX512DQ-32: # %bb.0: ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512DQ-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512DQ-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; AVX512DQ-32-NEXT: retl ; ; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8: @@ -371,7 +371,7 @@ ; AVX512BW-32-LABEL: test_mul_v16i32_v16i8: ; AVX512BW-32: # %bb.0: ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %zmm0, %zmm0 +; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; AVX512BW-32-NEXT: retl ; ; AVX512BW-64-LABEL: test_mul_v16i32_v16i8: @@ -383,7 +383,7 @@ ; KNL-32-LABEL: test_mul_v16i32_v16i8: ; KNL-32: # %bb.0: ; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; KNL-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; KNL-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; KNL-32-NEXT: retl ; ; KNL-64-LABEL: test_mul_v16i32_v16i8: @@ -418,7 +418,7 @@ ; SSE4-32-LABEL: test_mul_v4i32_v4i16: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE4-32-NEXT: retl ; ; SSE4-64-LABEL: test_mul_v4i32_v4i16: @@ -666,7 +666,7 @@ ; AVX512-32-LABEL: test_mul_v16i32_v16i16: ; AVX512-32: # %bb.0: ; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; AVX512-32-NEXT: retl ; ; AVX512-64-LABEL: test_mul_v16i32_v16i16: @@ -687,7 +687,7 @@ ; CHECK32-LABEL: test_mul_v4i32_v4i8_minsize: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK32-NEXT: retl ; ; CHECK64-LABEL: test_mul_v4i32_v4i8_minsize: @@ -699,7 +699,7 @@ ; SSE4-32-LABEL: test_mul_v4i32_v4i8_minsize: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; SSE4-32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE4-32-NEXT: retl ; ; SSE4-64-LABEL: test_mul_v4i32_v4i8_minsize: @@ -711,7 +711,7 @@ ; AVX2-32-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX2-32: # %bb.0: ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX2-32-NEXT: retl ; ; AVX2-64-LABEL: test_mul_v4i32_v4i8_minsize: @@ -723,7 +723,7 @@ ; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX512DQ-32: # %bb.0: ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512DQ-32-NEXT: retl ; ; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8_minsize: @@ -735,7 +735,7 @@ ; AVX512BW-32-LABEL: test_mul_v4i32_v4i8_minsize: ; AVX512BW-32: # %bb.0: ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero -; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512BW-32-NEXT: retl ; ; AVX512BW-64-LABEL: test_mul_v4i32_v4i8_minsize: @@ -826,7 +826,7 @@ ; AVX2-32-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX2-32: # %bb.0: ; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX2-32-NEXT: retl ; ; AVX2-64-LABEL: test_mul_v8i32_v8i8_minsize: @@ -838,7 +838,7 @@ ; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX512DQ-32: # %bb.0: ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX512DQ-32-NEXT: retl ; ; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8_minsize: @@ -850,7 +850,7 @@ ; AVX512BW-32-LABEL: test_mul_v8i32_v8i8_minsize: ; AVX512BW-32: # %bb.0: ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero -; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX512BW-32-NEXT: retl ; ; AVX512BW-64-LABEL: test_mul_v8i32_v8i8_minsize: @@ -997,7 +997,7 @@ ; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8_minsize: ; AVX512DQ-32: # %bb.0: ; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512DQ-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512DQ-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; AVX512DQ-32-NEXT: retl ; ; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8_minsize: @@ -1009,7 +1009,7 @@ ; AVX512BW-32-LABEL: test_mul_v16i32_v16i8_minsize: ; AVX512BW-32: # %bb.0: ; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %zmm0, %zmm0 +; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 ; AVX512BW-32-NEXT: retl ; ; AVX512BW-64-LABEL: test_mul_v16i32_v16i8_minsize: @@ -1021,7 +1021,7 @@ ; KNL-32-LABEL: test_mul_v16i32_v16i8_minsize: ; KNL-32: # %bb.0: ; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero -; KNL-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; KNL-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; KNL-32-NEXT: retl ; ; KNL-64-LABEL: test_mul_v16i32_v16i8_minsize: @@ -1038,7 +1038,7 @@ ; CHECK32-LABEL: test_mul_v4i32_v4i16_minsize: ; CHECK32: # %bb.0: ; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; CHECK32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK32-NEXT: retl ; ; CHECK64-LABEL: test_mul_v4i32_v4i16_minsize: @@ -1050,7 +1050,7 @@ ; SSE4-32-LABEL: test_mul_v4i32_v4i16_minsize: ; SSE4-32: # %bb.0: ; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; SSE4-32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE4-32-NEXT: retl ; ; SSE4-64-LABEL: test_mul_v4i32_v4i16_minsize: @@ -1260,7 +1260,7 @@ ; AVX512-32-LABEL: test_mul_v16i32_v16i16_minsize: ; AVX512-32: # %bb.0: ; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero -; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; AVX512-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; AVX512-32-NEXT: retl ; ; AVX512-64-LABEL: test_mul_v16i32_v16i16_minsize: diff --git a/llvm/test/CodeGen/X86/sse-fcopysign.ll b/llvm/test/CodeGen/X86/sse-fcopysign.ll --- a/llvm/test/CodeGen/X86/sse-fcopysign.ll +++ b/llvm/test/CodeGen/X86/sse-fcopysign.ll @@ -65,9 +65,9 @@ ; X32: # %bb.0: ; X32-NEXT: pushl %eax ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 +; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: orps %xmm0, %xmm1 ; X32-NEXT: movss %xmm1, (%esp) ; X32-NEXT: flds (%esp) @@ -94,9 +94,9 @@ ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: addss 20(%ebp), %xmm0 ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero -; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 +; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-NEXT: cvtss2sd %xmm0, %xmm0 -; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-NEXT: orps %xmm1, %xmm0 ; X32-NEXT: movlps %xmm0, (%esp) ; X32-NEXT: fldl (%esp) diff --git a/llvm/test/CodeGen/X86/sse-load-ret.ll b/llvm/test/CodeGen/X86/sse-load-ret.ll --- a/llvm/test/CodeGen/X86/sse-load-ret.ll +++ b/llvm/test/CodeGen/X86/sse-load-ret.ll @@ -14,7 +14,7 @@ define double @test2() { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: -; CHECK-NEXT: fldl {{\.LCPI.*}} +; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}} ; CHECK-NEXT: retl ret double 1.234560e+03 } diff --git a/llvm/test/CodeGen/X86/sse1-fcopysign.ll b/llvm/test/CodeGen/X86/sse1-fcopysign.ll --- a/llvm/test/CodeGen/X86/sse1-fcopysign.ll +++ b/llvm/test/CodeGen/X86/sse1-fcopysign.ll @@ -7,7 +7,7 @@ ; X86: # %bb.0: ; X86-NEXT: pushl %eax ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: movss %xmm0, (%esp) ; X86-NEXT: flds (%esp) ; X86-NEXT: popl %eax @@ -26,7 +26,7 @@ ; X86: # %bb.0: ; X86-NEXT: pushl %eax ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: movss %xmm0, (%esp) ; X86-NEXT: flds (%esp) ; X86-NEXT: popl %eax @@ -43,7 +43,7 @@ define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind { ; X86-LABEL: v4f32_pos: ; X86: # %bb.0: -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: v4f32_pos: @@ -57,7 +57,7 @@ define <4 x float> @v4f32_neg(<4 x float> %a, <4 x float> %b) nounwind { ; X86-LABEL: v4f32_neg: ; X86: # %bb.0: -; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: v4f32_neg: @@ -72,8 +72,8 @@ ; X86-LABEL: v4f32_const_mag: ; X86: # %bb.0: ; X86-NEXT: movaps %xmm1, %xmm0 -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 -; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: v4f32_const_mag: diff --git a/llvm/test/CodeGen/X86/sse1.ll b/llvm/test/CodeGen/X86/sse1.ll --- a/llvm/test/CodeGen/X86/sse1.ll +++ b/llvm/test/CodeGen/X86/sse1.ll @@ -180,7 +180,7 @@ ; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero ; X86-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; X86-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0] -; X86-NEXT: andps {{\.LCPI.*}}, %xmm2 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-NEXT: movaps %xmm2, (%eax) ; X86-NEXT: addl $16, %esp ; X86-NEXT: popl %esi @@ -238,7 +238,7 @@ define <2 x float> @PR31672() #0 { ; X86-LABEL: PR31672: ; X86: # %bb.0: -; X86-NEXT: sqrtps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: sqrtps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: PR31672: diff --git a/llvm/test/CodeGen/X86/sse2.ll b/llvm/test/CodeGen/X86/sse2.ll --- a/llvm/test/CodeGen/X86/sse2.ll +++ b/llvm/test/CodeGen/X86/sse2.ll @@ -675,7 +675,7 @@ define <4 x i32> @PR19721(<4 x i32> %i) { ; X86-SSE-LABEL: PR19721: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; AVX-LABEL: PR19721: diff --git a/llvm/test/CodeGen/X86/sse3.ll b/llvm/test/CodeGen/X86/sse3.ll --- a/llvm/test/CodeGen/X86/sse3.ll +++ b/llvm/test/CodeGen/X86/sse3.ll @@ -397,7 +397,7 @@ ; X86-LABEL: t17: ; X86: # %bb.0: # %entry ; X86-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1] -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: t17: diff --git a/llvm/test/CodeGen/X86/uint64-to-float.ll b/llvm/test/CodeGen/X86/uint64-to-float.ll --- a/llvm/test/CodeGen/X86/uint64-to-float.ll +++ b/llvm/test/CodeGen/X86/uint64-to-float.ll @@ -18,7 +18,7 @@ ; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp) ; X86-NEXT: shrl $31, %eax ; X86-NEXT: fildll {{[0-9]+}}(%esp) -; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; X86-NEXT: fstps {{[0-9]+}}(%esp) ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-NEXT: movss %xmm0, (%esp) diff --git a/llvm/test/CodeGen/X86/uint_to_fp-2.ll b/llvm/test/CodeGen/X86/uint_to_fp-2.ll --- a/llvm/test/CodeGen/X86/uint_to_fp-2.ll +++ b/llvm/test/CodeGen/X86/uint_to_fp-2.ll @@ -7,8 +7,8 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: pushl %eax ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; CHECK-NEXT: orpd {{\.LCPI.*}}, %xmm0 -; CHECK-NEXT: subsd {{\.LCPI.*}}, %xmm0 +; CHECK-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; CHECK-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0 ; CHECK-NEXT: movss %xmm0, (%esp) ; CHECK-NEXT: flds (%esp) @@ -26,8 +26,8 @@ ; CHECK-NEXT: pushl %eax ; CHECK-NEXT: xorps %xmm1, %xmm1 ; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; CHECK-NEXT: orps {{\.LCPI.*}}, %xmm1 -; CHECK-NEXT: subsd {{\.LCPI.*}}, %xmm1 +; CHECK-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; CHECK-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0 ; CHECK-NEXT: movss %xmm0, (%esp) diff --git a/llvm/test/CodeGen/X86/uint_to_fp-3.ll b/llvm/test/CodeGen/X86/uint_to_fp-3.ll --- a/llvm/test/CodeGen/X86/uint_to_fp-3.ll +++ b/llvm/test/CodeGen/X86/uint_to_fp-3.ll @@ -9,13 +9,13 @@ define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) { ; X32-SSE-LABEL: mask_ucvt_4i32_4f32: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: mask_ucvt_4i32_4f32: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-AVX-NEXT: retl ; @@ -38,7 +38,7 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) { ; X32-SSE-LABEL: mask_ucvt_4i32_4f64: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2 ; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm1 @@ -47,7 +47,7 @@ ; ; X32-AVX-LABEL: mask_ucvt_4i32_4f64: ; X32-AVX: # %bb.0: -; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X32-AVX-NEXT: retl ; @@ -80,7 +80,7 @@ ; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X32-SSE-NEXT: psrld $16, %xmm0 ; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 -; X32-SSE-NEXT: mulps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: mulps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: @@ -90,7 +90,7 @@ ; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2] ; X32-AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 -; X32-AVX-NEXT: vmulps {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-AVX-NEXT: vmulps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: diff --git a/llvm/test/CodeGen/X86/urem-power-of-two.ll b/llvm/test/CodeGen/X86/urem-power-of-two.ll --- a/llvm/test/CodeGen/X86/urem-power-of-two.ll +++ b/llvm/test/CodeGen/X86/urem-power-of-two.ll @@ -106,7 +106,7 @@ define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) { ; X86-LABEL: vec_const_uniform_pow_2: ; X86: # %bb.0: -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_const_uniform_pow_2: @@ -120,7 +120,7 @@ define <4 x i32> @vec_const_nonuniform_pow_2(<4 x i32> %x) { ; X86-LABEL: vec_const_nonuniform_pow_2: ; X86: # %bb.0: -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vec_const_nonuniform_pow_2: diff --git a/llvm/test/CodeGen/X86/var-permute-256.ll b/llvm/test/CodeGen/X86/var-permute-256.ll --- a/llvm/test/CodeGen/X86/var-permute-256.ll +++ b/llvm/test/CodeGen/X86/var-permute-256.ll @@ -34,7 +34,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -94,7 +94,7 @@ ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -454,7 +454,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -514,7 +514,7 @@ ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -576,7 +576,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 @@ -638,7 +638,7 @@ ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -997,7 +997,7 @@ ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2 -; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2 ; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0 @@ -1059,7 +1059,7 @@ ; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1 ; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll --- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll +++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll @@ -2031,7 +2031,7 @@ ; SSE-32-LABEL: strict_vector_fptosi_v2f64_to_v2i8: ; SSE-32: # %bb.0: ; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0 -; SSE-32-NEXT: andpd {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: andpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: retl @@ -2082,7 +2082,7 @@ ; SSE-32-LABEL: strict_vector_fptoui_v2f64_to_v2i8: ; SSE-32: # %bb.0: ; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0 -; SSE-32-NEXT: andpd {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: andpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: retl @@ -2134,7 +2134,7 @@ ; SSE-32: # %bb.0: ; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 -; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: retl @@ -2192,7 +2192,7 @@ ; SSE-32: # %bb.0: ; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 -; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: packuswb %xmm0, %xmm0 ; SSE-32-NEXT: retl @@ -3037,7 +3037,7 @@ ; SSE-32-NEXT: movaps %xmm0, %xmm3 ; SSE-32-NEXT: cmpltps %xmm2, %xmm3 ; SSE-32-NEXT: movaps %xmm3, %xmm1 -; SSE-32-NEXT: andnps {{\.LCPI.*}}, %xmm1 +; SSE-32-NEXT: andnps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; SSE-32-NEXT: andnps %xmm2, %xmm3 ; SSE-32-NEXT: subps %xmm3, %xmm0 ; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll --- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll +++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll @@ -278,14 +278,14 @@ ; SSE-32-NEXT: movd %xmm1, %eax ; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE-32-NEXT: fstps (%esp) ; SSE-32-NEXT: wait ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE-32-NEXT: movd %xmm0, %eax ; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE-32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE-32-NEXT: wait ; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero @@ -347,14 +347,14 @@ ; SSE41-32-NEXT: movd %xmm1, %eax ; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE41-32-NEXT: fstps (%esp) ; SSE41-32-NEXT: wait ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE41-32-NEXT: movd %xmm0, %eax ; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE41-32-NEXT: fstps {{[0-9]+}}(%esp) ; SSE41-32-NEXT: wait ; SSE41-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero @@ -415,13 +415,13 @@ ; AVX-32-NEXT: vextractps $1, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $3, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstps (%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero @@ -526,7 +526,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 { ; SSE-32-LABEL: uitofp_v4i1_v4f32: ; SSE-32: # %bb.0: -; SSE-32-NEXT: andps {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: cvtdq2ps %xmm0, %xmm0 ; SSE-32-NEXT: retl ; @@ -538,7 +538,7 @@ ; ; SSE41-32-LABEL: uitofp_v4i1_v4f32: ; SSE41-32: # %bb.0: -; SSE41-32-NEXT: andps {{\.LCPI.*}}, %xmm0 +; SSE41-32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE41-32-NEXT: cvtdq2ps %xmm0, %xmm0 ; SSE41-32-NEXT: retl ; @@ -550,7 +550,7 @@ ; ; AVX1-32-LABEL: uitofp_v4i1_v4f32: ; AVX1-32: # %bb.0: -; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; AVX1-32-NEXT: retl ; @@ -569,7 +569,7 @@ ; ; AVX512VL-32-LABEL: uitofp_v4i1_v4f32: ; AVX512VL-32: # %bb.0: -; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; AVX512VL-32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; AVX512VL-32-NEXT: retl ; @@ -588,7 +588,7 @@ ; ; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f32: ; AVX512DQVL-32: # %bb.0: -; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: retl ; @@ -737,10 +737,10 @@ ; SSE-32: # %bb.0: ; SSE-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; SSE-32-NEXT: pand %xmm0, %xmm1 -; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm1 +; SSE-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; SSE-32-NEXT: psrld $16, %xmm0 -; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm0 -; SSE-32-NEXT: subps {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE-32-NEXT: subps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: addps %xmm1, %xmm0 ; SSE-32-NEXT: retl ; @@ -759,10 +759,10 @@ ; SSE41-32: # %bb.0: ; SSE41-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535] ; SSE41-32-NEXT: pand %xmm0, %xmm1 -; SSE41-32-NEXT: por {{\.LCPI.*}}, %xmm1 +; SSE41-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; SSE41-32-NEXT: psrld $16, %xmm0 -; SSE41-32-NEXT: por {{\.LCPI.*}}, %xmm0 -; SSE41-32-NEXT: subps {{\.LCPI.*}}, %xmm0 +; SSE41-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; SSE41-32-NEXT: subps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE41-32-NEXT: addps %xmm1, %xmm0 ; SSE41-32-NEXT: retl ; @@ -782,7 +782,7 @@ ; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] ; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7] -; AVX1-32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-32-NEXT: vsubps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; AVX1-32-NEXT: retl ; @@ -860,7 +860,7 @@ ; SSE-32-LABEL: uitofp_v2i1_v2f64: ; SSE-32: # %bb.0: ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-32-NEXT: cvtdq2pd %xmm0, %xmm0 ; SSE-32-NEXT: retl ; @@ -874,7 +874,7 @@ ; SSE41-32-LABEL: uitofp_v2i1_v2f64: ; SSE41-32: # %bb.0: ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE41-32-NEXT: pand {{\.LCPI.*}}, %xmm0 +; SSE41-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE41-32-NEXT: cvtdq2pd %xmm0, %xmm0 ; SSE41-32-NEXT: retl ; @@ -888,7 +888,7 @@ ; AVX1-32-LABEL: uitofp_v2i1_v2f64: ; AVX1-32: # %bb.0: ; AVX1-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; AVX1-32-NEXT: retl ; @@ -910,7 +910,7 @@ ; AVX512VL-32-LABEL: uitofp_v2i1_v2f64: ; AVX512VL-32: # %bb.0: ; AVX512VL-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; AVX512VL-32-NEXT: retl ; @@ -932,7 +932,7 @@ ; AVX512DQVL-32-LABEL: uitofp_v2i1_v2f64: ; AVX512DQVL-32: # %bb.0: ; AVX512DQVL-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: retl ; @@ -1276,14 +1276,14 @@ ; SSE-32-NEXT: movd %xmm1, %eax ; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE-32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE-32-NEXT: wait ; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE-32-NEXT: movd %xmm0, %eax ; SSE-32-NEXT: shrl $31, %eax ; SSE-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE-32-NEXT: fstpl (%esp) ; SSE-32-NEXT: wait ; SSE-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -1344,14 +1344,14 @@ ; SSE41-32-NEXT: movd %xmm1, %eax ; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE41-32-NEXT: fstpl {{[0-9]+}}(%esp) ; SSE41-32-NEXT: wait ; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE41-32-NEXT: movd %xmm0, %eax ; SSE41-32-NEXT: shrl $31, %eax ; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp) -; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; SSE41-32-NEXT: fstpl (%esp) ; SSE41-32-NEXT: wait ; SSE41-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero @@ -1411,13 +1411,13 @@ ; AVX-32-NEXT: vextractps $1, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $3, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstpl (%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll --- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll +++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll @@ -94,7 +94,7 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 { ; AVX1-32-LABEL: uitofp_v8i1_v8f32: ; AVX1-32: # %bb.0: -; AVX1-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-32-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; AVX1-32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero @@ -114,7 +114,7 @@ ; ; AVX2-32-LABEL: uitofp_v8i1_v8f32: ; AVX2-32: # %bb.0: -; AVX2-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX2-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-32-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX2-32-NEXT: retl @@ -128,7 +128,7 @@ ; ; AVX512F-32-LABEL: uitofp_v8i1_v8f32: ; AVX512F-32: # %bb.0: -; AVX512F-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512F-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512F-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512F-32-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX512F-32-NEXT: retl @@ -142,7 +142,7 @@ ; ; AVX512VL-32-LABEL: uitofp_v8i1_v8f32: ; AVX512VL-32: # %bb.0: -; AVX512VL-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512VL-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512VL-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512VL-32-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX512VL-32-NEXT: retl @@ -156,7 +156,7 @@ ; ; AVX512DQ-32-LABEL: uitofp_v8i1_v8f32: ; AVX512DQ-32: # %bb.0: -; AVX512DQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512DQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512DQ-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512DQ-32-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX512DQ-32-NEXT: retl @@ -170,7 +170,7 @@ ; ; AVX512DQVL-32-LABEL: uitofp_v8i1_v8f32: ; AVX512DQVL-32: # %bb.0: -; AVX512DQVL-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX512DQVL-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512DQVL-32-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX512DQVL-32-NEXT: retl @@ -386,8 +386,8 @@ ; AVX1-32-NEXT: vpsrld $16, %xmm2, %xmm2 ; AVX1-32-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-32-NEXT: vcvtdq2ps %ymm1, %ymm1 -; AVX1-32-NEXT: vmulps {{\.LCPI.*}}, %ymm1, %ymm1 -; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; AVX1-32-NEXT: vmulps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 +; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; AVX1-32-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX1-32-NEXT: vaddps %ymm0, %ymm1, %ymm0 ; AVX1-32-NEXT: retl @@ -462,7 +462,7 @@ define <4 x double> @uitofp_v4i1_v4f64(<4 x i1> %x) #0 { ; AVX1-32-LABEL: uitofp_v4i1_v4f64: ; AVX1-32: # %bb.0: -; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX1-32-NEXT: vcvtdq2pd %xmm0, %ymm0 ; AVX1-32-NEXT: retl ; @@ -488,7 +488,7 @@ ; ; AVX512VL-32-LABEL: uitofp_v4i1_v4f64: ; AVX512VL-32: # %bb.0: -; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %ymm0 ; AVX512VL-32-NEXT: retl ; @@ -507,7 +507,7 @@ ; ; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f64: ; AVX512DQVL-32: # %bb.0: -; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %ymm0 ; AVX512DQVL-32-NEXT: retl ; @@ -767,25 +767,25 @@ ; AVX-32-NEXT: vextractps $1, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstpl (%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $3, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $1, %xmm1, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $3, %xmm1, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -1051,25 +1051,25 @@ ; AVX-32-NEXT: vextractps $1, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstps (%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $3, %xmm0, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $1, %xmm1, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vextractps $3, %xmm1, %eax ; AVX-32-NEXT: shrl $31, %eax ; AVX-32-NEXT: fildll {{[0-9]+}}(%esp) -; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; AVX-32-NEXT: fstps {{[0-9]+}}(%esp) ; AVX-32-NEXT: wait ; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll --- a/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll +++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll @@ -42,7 +42,7 @@ define <16 x float> @uitofp_v16i1_v16f32(<16 x i1> %x) #0 { ; NODQ-32-LABEL: uitofp_v16i1_v16f32: ; NODQ-32: # %bb.0: -; NODQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; NODQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; NODQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; NODQ-32-NEXT: vcvtdq2ps %zmm0, %zmm0 ; NODQ-32-NEXT: retl @@ -56,7 +56,7 @@ ; ; DQ-32-LABEL: uitofp_v16i1_v16f32: ; DQ-32: # %bb.0: -; DQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; DQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero ; DQ-32-NEXT: vcvtdq2ps %zmm0, %zmm0 ; DQ-32-NEXT: retl @@ -160,7 +160,7 @@ define <8 x double> @uitofp_v8i1_v8f64(<8 x i1> %x) #0 { ; NODQ-32-LABEL: uitofp_v8i1_v8f64: ; NODQ-32: # %bb.0: -; NODQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; NODQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; NODQ-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; NODQ-32-NEXT: vcvtdq2pd %ymm0, %zmm0 ; NODQ-32-NEXT: retl @@ -174,7 +174,7 @@ ; ; DQ-32-LABEL: uitofp_v8i1_v8f64: ; DQ-32: # %bb.0: -; DQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; DQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; DQ-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; DQ-32-NEXT: vcvtdq2pd %ymm0, %zmm0 ; DQ-32-NEXT: retl @@ -387,49 +387,49 @@ ; NODQ-32-NEXT: vextractps $1, %xmm2, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm2, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $1, %xmm3, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm3, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $1, %xmm0, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl (%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm0, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $1, %xmm1, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm1, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero @@ -612,49 +612,49 @@ ; NODQ-32-NEXT: vextractps $1, %xmm0, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps (%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm0, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $1, %xmm3, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm3, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $1, %xmm2, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm2, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $1, %xmm1, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vextractps $3, %xmm1, %eax ; NODQ-32-NEXT: shrl $31, %eax ; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp) -; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4) +; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4) ; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp) ; NODQ-32-NEXT: wait ; NODQ-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero diff --git a/llvm/test/CodeGen/X86/vec_fabs.ll b/llvm/test/CodeGen/X86/vec_fabs.ll --- a/llvm/test/CodeGen/X86/vec_fabs.ll +++ b/llvm/test/CodeGen/X86/vec_fabs.ll @@ -9,7 +9,7 @@ define <2 x double> @fabs_v2f64(<2 x double> %p) { ; X86-LABEL: fabs_v2f64: ; X86: # %bb.0: -; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: fabs_v2f64: @@ -24,17 +24,17 @@ define <4 x float> @fabs_v4f32(<4 x float> %p) { ; X86-AVX-LABEL: fabs_v4f32: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X86-AVX512VL-LABEL: fabs_v4f32: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; X86-AVX512VL-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; X86-AVX512VL-NEXT: retl ; ; X86-AVX512VLDQ-LABEL: fabs_v4f32: ; X86-AVX512VLDQ: # %bb.0: -; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0 +; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0 ; X86-AVX512VLDQ-NEXT: retl ; ; X64-AVX-LABEL: fabs_v4f32: @@ -59,17 +59,17 @@ define <4 x double> @fabs_v4f64(<4 x double> %p) { ; X86-AVX-LABEL: fabs_v4f64: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX-NEXT: retl ; ; X86-AVX512VL-LABEL: fabs_v4f64: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to4}, %ymm0, %ymm0 +; X86-AVX512VL-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0 ; X86-AVX512VL-NEXT: retl ; ; X86-AVX512VLDQ-LABEL: fabs_v4f64: ; X86-AVX512VLDQ: # %bb.0: -; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to4}, %ymm0, %ymm0 +; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0 ; X86-AVX512VLDQ-NEXT: retl ; ; X64-AVX-LABEL: fabs_v4f64: @@ -94,17 +94,17 @@ define <8 x float> @fabs_v8f32(<8 x float> %p) { ; X86-AVX-LABEL: fabs_v8f32: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX-NEXT: retl ; ; X86-AVX512VL-LABEL: fabs_v8f32: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 +; X86-AVX512VL-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0 ; X86-AVX512VL-NEXT: retl ; ; X86-AVX512VLDQ-LABEL: fabs_v8f32: ; X86-AVX512VLDQ: # %bb.0: -; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to8}, %ymm0, %ymm0 +; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0 ; X86-AVX512VLDQ-NEXT: retl ; ; X64-AVX-LABEL: fabs_v8f32: @@ -136,12 +136,12 @@ ; ; X86-AVX512VL-LABEL: fabs_v8f64: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0 +; X86-AVX512VL-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0 ; X86-AVX512VL-NEXT: retl ; ; X86-AVX512VLDQ-LABEL: fabs_v8f64: ; X86-AVX512VLDQ: # %bb.0: -; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to8}, %zmm0, %zmm0 +; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0 ; X86-AVX512VLDQ-NEXT: retl ; ; X64-AVX-LABEL: fabs_v8f64: @@ -175,12 +175,12 @@ ; ; X86-AVX512VL-LABEL: fabs_v16f32: ; X86-AVX512VL: # %bb.0: -; X86-AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; X86-AVX512VL-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; X86-AVX512VL-NEXT: retl ; ; X86-AVX512VLDQ-LABEL: fabs_v16f32: ; X86-AVX512VLDQ: # %bb.0: -; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm0 +; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0 ; X86-AVX512VLDQ-NEXT: retl ; ; X64-AVX-LABEL: fabs_v16f32: diff --git a/llvm/test/CodeGen/X86/vec_fneg.ll b/llvm/test/CodeGen/X86/vec_fneg.ll --- a/llvm/test/CodeGen/X86/vec_fneg.ll +++ b/llvm/test/CodeGen/X86/vec_fneg.ll @@ -10,7 +10,7 @@ define <4 x float> @t1(<4 x float> %Q) nounwind { ; X32-SSE-LABEL: t1: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: t1: @@ -166,7 +166,7 @@ define <4 x float> @fneg_undef_elts_v4f32(<4 x float> %x) { ; X32-SSE-LABEL: fneg_undef_elts_v4f32: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: fneg_undef_elts_v4f32: @@ -194,7 +194,7 @@ define <4 x float> @fneg(<4 x float> %Q) nounwind { ; X32-SSE-LABEL: fneg: ; X32-SSE: # %bb.0: -; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: fneg: diff --git a/llvm/test/CodeGen/X86/vec_fpext.ll b/llvm/test/CodeGen/X86/vec_fpext.ll --- a/llvm/test/CodeGen/X86/vec_fpext.ll +++ b/llvm/test/CodeGen/X86/vec_fpext.ll @@ -255,42 +255,42 @@ ; X32-SSE: # %bb.0: # %entry ; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0] ; X32-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A] -; X32-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X32-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X32-SSE-NEXT: retl # encoding: [0xc3] ; ; X32-AVX-LABEL: fpext_fromconst: ; X32-AVX: # %bb.0: # %entry ; X32-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0] ; X32-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A] -; X32-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X32-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X32-AVX-NEXT: retl # encoding: [0xc3] ; ; X32-AVX512VL-LABEL: fpext_fromconst: ; X32-AVX512VL: # %bb.0: # %entry -; X32-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0] +; X32-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0] ; X32-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A] -; X32-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4 +; X32-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 ; X32-AVX512VL-NEXT: retl # encoding: [0xc3] ; ; X64-SSE-LABEL: fpext_fromconst: ; X64-SSE: # %bb.0: # %entry ; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0] ; X64-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A] -; X64-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-SSE-NEXT: retq # encoding: [0xc3] ; ; X64-AVX-LABEL: fpext_fromconst: ; X64-AVX: # %bb.0: # %entry ; X64-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0] ; X64-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A] -; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX-NEXT: retq # encoding: [0xc3] ; ; X64-AVX512VL-LABEL: fpext_fromconst: ; X64-AVX512VL: # %bb.0: # %entry ; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0] ; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A] -; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte +; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; X64-AVX512VL-NEXT: retq # encoding: [0xc3] entry: %0 = insertelement <2 x float> undef, float 1.0, i32 0 diff --git a/llvm/test/CodeGen/X86/vec_fptrunc.ll b/llvm/test/CodeGen/X86/vec_fptrunc.ll --- a/llvm/test/CodeGen/X86/vec_fptrunc.ll +++ b/llvm/test/CodeGen/X86/vec_fptrunc.ll @@ -186,14 +186,14 @@ define <4 x float> @fptrunc_fromconst() { ; X32-SSE-LABEL: fptrunc_fromconst: ; X32-SSE: # %bb.0: # %entry -; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: cvtpd2ps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 +; X32-SSE-NEXT: cvtpd2ps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; X32-SSE-NEXT: retl ; ; X32-AVX-LABEL: fptrunc_fromconst: ; X32-AVX: # %bb.0: # %entry -; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI.*}}, %xmm0 +; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-AVX-NEXT: retl ; ; X64-SSE-LABEL: fptrunc_fromconst: diff --git a/llvm/test/CodeGen/X86/vec_logical.ll b/llvm/test/CodeGen/X86/vec_logical.ll --- a/llvm/test/CodeGen/X86/vec_logical.ll +++ b/llvm/test/CodeGen/X86/vec_logical.ll @@ -5,13 +5,13 @@ define void @t(<4 x float> %A) { ; SSE-LABEL: t: ; SSE: # %bb.0: -; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; SSE-NEXT: movaps %xmm0, 0 ; SSE-NEXT: retl ; ; AVX-LABEL: t: ; AVX: # %bb.0: -; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX-NEXT: vxorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; AVX-NEXT: vmovaps %xmm0, 0 ; AVX-NEXT: retl %tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A diff --git a/llvm/test/CodeGen/X86/vec_partial.ll b/llvm/test/CodeGen/X86/vec_partial.ll --- a/llvm/test/CodeGen/X86/vec_partial.ll +++ b/llvm/test/CodeGen/X86/vec_partial.ll @@ -6,7 +6,7 @@ define <3 x float> @addf3(<3 x float> %x) { ; X86-LABEL: addf3: ; X86: # %bb.0: # %entry -; X86-NEXT: addps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: addps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: addf3: diff --git a/llvm/test/CodeGen/X86/vec_reassociate.ll b/llvm/test/CodeGen/X86/vec_reassociate.ll --- a/llvm/test/CodeGen/X86/vec_reassociate.ll +++ b/llvm/test/CodeGen/X86/vec_reassociate.ll @@ -38,7 +38,7 @@ ; X86-LABEL: mul_4i32: ; X86: # %bb.0: ; X86-NEXT: pmulld %xmm1, %xmm0 -; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_4i32: @@ -56,7 +56,7 @@ ; X86-LABEL: mul_4i32_commute: ; X86: # %bb.0: ; X86-NEXT: pmulld %xmm1, %xmm0 -; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: mul_4i32_commute: @@ -74,7 +74,7 @@ ; X86-LABEL: and_4i32: ; X86: # %bb.0: ; X86-NEXT: andps %xmm1, %xmm0 -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: and_4i32: @@ -92,7 +92,7 @@ ; X86-LABEL: and_4i32_commute: ; X86: # %bb.0: ; X86-NEXT: andps %xmm1, %xmm0 -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: and_4i32_commute: @@ -110,7 +110,7 @@ ; X86-LABEL: or_4i32: ; X86: # %bb.0: ; X86-NEXT: orps %xmm1, %xmm0 -; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: or_4i32: @@ -128,7 +128,7 @@ ; X86-LABEL: or_4i32_commute: ; X86: # %bb.0: ; X86-NEXT: orps %xmm1, %xmm0 -; X86-NEXT: orps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: or_4i32_commute: @@ -146,7 +146,7 @@ ; X86-LABEL: xor_4i32: ; X86: # %bb.0: ; X86-NEXT: xorps %xmm1, %xmm0 -; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: xor_4i32: @@ -164,7 +164,7 @@ ; X86-LABEL: xor_4i32_commute: ; X86: # %bb.0: ; X86-NEXT: xorps %xmm1, %xmm0 -; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: xor_4i32_commute: diff --git a/llvm/test/CodeGen/X86/vec_shift4.ll b/llvm/test/CodeGen/X86/vec_shift4.ll --- a/llvm/test/CodeGen/X86/vec_shift4.ll +++ b/llvm/test/CodeGen/X86/vec_shift4.ll @@ -6,7 +6,7 @@ ; X86-LABEL: shl1: ; X86: # %bb.0: # %entry ; X86-NEXT: pslld $23, %xmm1 -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-NEXT: pmulld %xmm1, %xmm0 ; X86-NEXT: retl @@ -31,12 +31,12 @@ ; X86-NEXT: psllw $5, %xmm1 ; X86-NEXT: movdqa %xmm0, %xmm3 ; X86-NEXT: psllw $4, %xmm3 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm3 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; X86-NEXT: movdqa %xmm2, %xmm3 ; X86-NEXT: psllw $2, %xmm3 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm3 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-NEXT: paddb %xmm1, %xmm1 ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: pblendvb %xmm0, %xmm3, %xmm2 diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll --- a/llvm/test/CodeGen/X86/vector-fshl-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll @@ -406,7 +406,7 @@ ; X86-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,3] ; X86-SSE2-NEXT: pand %xmm4, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm2 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -703,7 +703,7 @@ ; X86-SSE2-NEXT: pandn %xmm3, %xmm5 ; X86-SSE2-NEXT: psrlw $1, %xmm3 ; X86-SSE2-NEXT: pand %xmm4, %xmm3 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm1 ; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; X86-SSE2-NEXT: pslld $23, %xmm1 @@ -1017,7 +1017,7 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm7 ; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: pand %xmm3, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: por %xmm7, %xmm1 ; X86-SSE2-NEXT: paddb %xmm6, %xmm6 ; X86-SSE2-NEXT: pxor %xmm3, %xmm3 @@ -1026,7 +1026,7 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm7 ; X86-SSE2-NEXT: psrlw $2, %xmm1 ; X86-SSE2-NEXT: pand %xmm3, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: por %xmm7, %xmm1 ; X86-SSE2-NEXT: paddb %xmm6, %xmm6 ; X86-SSE2-NEXT: pxor %xmm3, %xmm3 @@ -1045,7 +1045,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm4 ; X86-SSE2-NEXT: psllw $4, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm4, %xmm0 ; X86-SSE2-NEXT: paddb %xmm2, %xmm2 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1 @@ -1054,7 +1054,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm4 ; X86-SSE2-NEXT: psllw $2, %xmm0 ; X86-SSE2-NEXT: pand %xmm1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm4, %xmm0 ; X86-SSE2-NEXT: pxor %xmm1, %xmm1 ; X86-SSE2-NEXT: paddb %xmm2, %xmm2 @@ -1766,7 +1766,7 @@ ; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0] ; X86-SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psrlw %xmm2, %xmm1 ; X86-SSE2-NEXT: psrlw %xmm2, %xmm5 ; X86-SSE2-NEXT: psrlw $8, %xmm5 @@ -2407,9 +2407,9 @@ ; ; X86-SSE2-LABEL: constant_funnnel_v8i16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pmulhuw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> ) @@ -2603,22 +2603,22 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm3, %xmm2 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm3, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm3 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: psrlw $8, %xmm3 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psrlw $8, %xmm1 ; X86-SSE2-NEXT: packuswb %xmm3, %xmm1 ; X86-SSE2-NEXT: por %xmm1, %xmm0 @@ -2932,9 +2932,9 @@ ; X86-SSE2-LABEL: splatconstant_funnnel_v16i8: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> ) diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll @@ -282,9 +282,9 @@ ; ; X86-SSE2-LABEL: var_funnnel_v4i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -458,7 +458,7 @@ ; ; X86-SSE2-LABEL: var_funnnel_v8i16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7] ; X86-SSE2-NEXT: pslld $23, %xmm2 @@ -706,20 +706,20 @@ ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 ; X86-SSE2-NEXT: psrlw $4, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm5 ; X86-SSE2-NEXT: psllw $4, %xmm5 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm5 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm5 ; X86-SSE2-NEXT: por %xmm4, %xmm5 ; X86-SSE2-NEXT: pand %xmm3, %xmm5 ; X86-SSE2-NEXT: pandn %xmm2, %xmm3 ; X86-SSE2-NEXT: por %xmm5, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm2 ; X86-SSE2-NEXT: psrlw $6, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm4 ; X86-SSE2-NEXT: psllw $2, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm2, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 @@ -731,7 +731,7 @@ ; X86-SSE2-NEXT: paddb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 ; X86-SSE2-NEXT: psrlw $7, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm3, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0 @@ -1103,7 +1103,7 @@ ; ; X86-SSE2-LABEL: splatvar_funnnel_v8i16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0] ; X86-SSE2-NEXT: pand %xmm1, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 @@ -1333,7 +1333,7 @@ ; ; X86-SSE2-LABEL: splatvar_funnnel_v16i8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; X86-SSE2-NEXT: psubb %xmm1, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm3 @@ -1848,20 +1848,20 @@ ; X86-SSE2-NEXT: pxor %xmm1, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: psrlw $8, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: psrlw $8, %xmm3 ; X86-SSE2-NEXT: packuswb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm2, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE2-NEXT: por %xmm3, %xmm0 @@ -2168,9 +2168,9 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> ) diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll @@ -135,9 +135,9 @@ ; ; X86-SSE2-LABEL: var_funnnel_v2i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -290,9 +290,9 @@ ; X86-SSE2-LABEL: splatvar_funnnel_v2i32: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll @@ -407,7 +407,7 @@ ; X86-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,3] ; X86-SSE2-NEXT: pandn %xmm4, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm2 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X86-SSE2-NEXT: pslld $1, %xmm0 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] @@ -702,7 +702,7 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm4 ; X86-SSE2-NEXT: psrlw $1, %xmm1 ; X86-SSE2-NEXT: pand %xmm3, %xmm1 -; X86-SSE2-NEXT: pandn {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 ; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; X86-SSE2-NEXT: pslld $23, %xmm3 @@ -1005,7 +1005,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm7 ; X86-SSE2-NEXT: psllw $4, %xmm0 ; X86-SSE2-NEXT: pand %xmm6, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm7, %xmm0 ; X86-SSE2-NEXT: paddb %xmm4, %xmm4 ; X86-SSE2-NEXT: pxor %xmm6, %xmm6 @@ -1014,7 +1014,7 @@ ; X86-SSE2-NEXT: pandn %xmm0, %xmm7 ; X86-SSE2-NEXT: psllw $2, %xmm0 ; X86-SSE2-NEXT: pand %xmm6, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm7, %xmm0 ; X86-SSE2-NEXT: paddb %xmm4, %xmm4 ; X86-SSE2-NEXT: pxor %xmm6, %xmm6 @@ -1031,7 +1031,7 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm6 ; X86-SSE2-NEXT: psrlw $4, %xmm1 ; X86-SSE2-NEXT: pand %xmm5, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: por %xmm6, %xmm1 ; X86-SSE2-NEXT: paddb %xmm2, %xmm2 ; X86-SSE2-NEXT: pxor %xmm5, %xmm5 @@ -1040,7 +1040,7 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm6 ; X86-SSE2-NEXT: psrlw $2, %xmm1 ; X86-SSE2-NEXT: pand %xmm5, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: por %xmm6, %xmm1 ; X86-SSE2-NEXT: paddb %xmm2, %xmm2 ; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm3 @@ -1048,7 +1048,7 @@ ; X86-SSE2-NEXT: pandn %xmm1, %xmm2 ; X86-SSE2-NEXT: psrlw $1, %xmm1 ; X86-SSE2-NEXT: pand %xmm3, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: por %xmm2, %xmm1 ; X86-SSE2-NEXT: por %xmm4, %xmm1 ; X86-SSE2-NEXT: por %xmm1, %xmm0 @@ -2133,10 +2133,10 @@ ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535] ; X86-SSE2-NEXT: movdqa %xmm2, %xmm3 ; X86-SSE2-NEXT: pandn %xmm1, %xmm3 -; X86-SSE2-NEXT: pmulhuw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pand %xmm1, %xmm2 ; X86-SSE2-NEXT: psllw $1, %xmm0 -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm3, %xmm0 ; X86-SSE2-NEXT: por %xmm2, %xmm0 ; X86-SSE2-NEXT: retl @@ -2321,20 +2321,20 @@ ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm3 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: psrlw $8, %xmm3 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psrlw $8, %xmm1 ; X86-SSE2-NEXT: packuswb %xmm3, %xmm1 ; X86-SSE2-NEXT: paddb %xmm0, %xmm0 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm3, %xmm2 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm3, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 @@ -2648,9 +2648,9 @@ ; X86-SSE2-LABEL: splatconstant_funnnel_v16i8: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> ) diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll @@ -296,9 +296,9 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubd %xmm1, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm2 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -492,7 +492,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubw %xmm1, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm1 ; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; X86-SSE2-NEXT: pslld $23, %xmm1 @@ -746,20 +746,20 @@ ; X86-SSE2-NEXT: pcmpgtb %xmm3, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 ; X86-SSE2-NEXT: psrlw $4, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm5 ; X86-SSE2-NEXT: psllw $4, %xmm5 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm5 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm5 ; X86-SSE2-NEXT: por %xmm4, %xmm5 ; X86-SSE2-NEXT: pand %xmm1, %xmm5 ; X86-SSE2-NEXT: pandn %xmm2, %xmm1 ; X86-SSE2-NEXT: por %xmm5, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE2-NEXT: psrlw $6, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm4 ; X86-SSE2-NEXT: psllw $2, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm2, %xmm4 ; X86-SSE2-NEXT: paddb %xmm3, %xmm3 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 @@ -771,7 +771,7 @@ ; X86-SSE2-NEXT: paddb %xmm2, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 ; X86-SSE2-NEXT: psrlw $7, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm1, %xmm4 ; X86-SSE2-NEXT: paddb %xmm3, %xmm3 ; X86-SSE2-NEXT: pcmpgtb %xmm3, %xmm0 @@ -1179,7 +1179,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubw %xmm1, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0] ; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 @@ -1420,7 +1420,7 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubb %xmm1, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; X86-SSE2-NEXT: psubb %xmm2, %xmm3 ; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0] @@ -1934,20 +1934,20 @@ ; X86-SSE2-NEXT: pxor %xmm1, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: psrlw $8, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: psrlw $8, %xmm3 ; X86-SSE2-NEXT: packuswb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm2, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE2-NEXT: por %xmm3, %xmm0 @@ -2254,9 +2254,9 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> ) diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll @@ -147,9 +147,9 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubd %xmm1, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm2 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -316,9 +316,9 @@ ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 ; X86-SSE2-NEXT: psubd %xmm1, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: pslld $23, %xmm2 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -6,7 +6,7 @@ ; CHECK-LABEL: AGEP0: ; CHECK: # %bb.0: ; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0 -; CHECK-NEXT: vpaddd {{\.LCPI.*}}, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; CHECK-NEXT: retl %vecinit.i = insertelement <4 x i32*> undef, i32* %ptr, i32 0 %vecinit2.i = insertelement <4 x i32*> %vecinit.i, i32* %ptr, i32 1 diff --git a/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll b/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll --- a/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll @@ -259,7 +259,7 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero -; X86-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: movlps %xmm0, (%eax) ; X86-NEXT: retl %a = load <2 x i32>, <2 x i32>* %x diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll --- a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll @@ -236,7 +236,7 @@ ; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE-NEXT: pxor %xmm4, %xmm4 ; X32-SSE-NEXT: pshufb %xmm1, %xmm2 ; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1 @@ -489,7 +489,7 @@ ; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE-NEXT: pxor %xmm4, %xmm4 ; X32-SSE-NEXT: pshufb %xmm1, %xmm2 ; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1 @@ -724,7 +724,7 @@ ; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE-NEXT: pxor %xmm4, %xmm4 ; X32-SSE-NEXT: pshufb %xmm1, %xmm2 ; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1 @@ -953,7 +953,7 @@ ; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE-NEXT: pxor %xmm4, %xmm4 ; X32-SSE-NEXT: pshufb %xmm1, %xmm2 ; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1 @@ -1153,7 +1153,7 @@ ; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE-NEXT: pxor %xmm4, %xmm4 ; X32-SSE-NEXT: pshufb %xmm1, %xmm2 ; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1 @@ -1346,7 +1346,7 @@ ; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X32-SSE-NEXT: pxor %xmm4, %xmm4 ; X32-SSE-NEXT: pshufb %xmm1, %xmm2 ; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1 @@ -1501,7 +1501,7 @@ ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 ; X32-SSE-NEXT: pshufb %xmm0, %xmm2 ; X32-SSE-NEXT: psrlw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: pxor %xmm3, %xmm3 ; X32-SSE-NEXT: pcmpeqb %xmm0, %xmm3 ; X32-SSE-NEXT: pand %xmm2, %xmm3 @@ -1651,7 +1651,7 @@ ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 ; X32-SSE-NEXT: pshufb %xmm0, %xmm2 ; X32-SSE-NEXT: psrlw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X32-SSE-NEXT: pxor %xmm3, %xmm3 ; X32-SSE-NEXT: pcmpeqb %xmm0, %xmm3 ; X32-SSE-NEXT: pand %xmm2, %xmm3 diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll --- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll @@ -164,7 +164,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3 ; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 @@ -346,7 +346,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3 ; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 @@ -503,7 +503,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3 ; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 @@ -655,7 +655,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3 ; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 @@ -778,7 +778,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3 ; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 @@ -895,7 +895,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3 ; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5 ; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 @@ -994,7 +994,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; X32-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3 ; X32-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2 @@ -1088,7 +1088,7 @@ ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] ; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0 -; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X32-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; X32-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3 ; X32-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2 diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll --- a/llvm/test/CodeGen/X86/vector-mul.ll +++ b/llvm/test/CodeGen/X86/vector-mul.ll @@ -55,7 +55,7 @@ ; X86-SSE-LABEL: mul_v16i8_32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psllw $5, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v16i8_32: @@ -118,7 +118,7 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_1_2_4_8: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_1_2_4_8: @@ -147,7 +147,7 @@ define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: @@ -250,7 +250,7 @@ define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_17: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_17: @@ -280,7 +280,7 @@ define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_17: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_17: @@ -301,7 +301,7 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: psllw $4, %xmm1 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: paddb %xmm0, %xmm1 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -523,7 +523,7 @@ define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_neg33: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_neg33: @@ -553,7 +553,7 @@ define <8 x i16> @mul_v8i16_neg9(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_neg9: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_neg9: @@ -574,7 +574,7 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: psllw $2, %xmm1 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: paddb %xmm0, %xmm1 ; X86-SSE-NEXT: pxor %xmm0, %xmm0 ; X86-SSE-NEXT: psubb %xmm1, %xmm0 @@ -845,7 +845,7 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_5_17_33_65: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_5_17_33_65: @@ -864,7 +864,7 @@ define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_2_3_9_17_33_65_129_257: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_2_3_9_17_33_65_129_257: @@ -885,10 +885,10 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE-NEXT: pand %xmm2, %xmm0 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: pand %xmm2, %xmm1 ; X86-SSE-NEXT: packuswb %xmm0, %xmm1 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 @@ -974,7 +974,7 @@ define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_7: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_7: @@ -1004,7 +1004,7 @@ define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_7: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_7: @@ -1025,7 +1025,7 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: psllw $5, %xmm1 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: psubb %xmm0, %xmm1 ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -1097,7 +1097,7 @@ define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_neg63: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_neg63: @@ -1127,7 +1127,7 @@ define <8 x i16> @mul_v8i16_neg31(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_neg31: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_neg31: @@ -1148,7 +1148,7 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: psllw $4, %xmm1 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: psubb %xmm1, %xmm0 ; X86-SSE-NEXT: retl ; @@ -1503,7 +1503,7 @@ define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind { ; X86-SSE-LABEL: mul_v4i32_0_15_31_7: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v4i32_0_15_31_7: @@ -1522,7 +1522,7 @@ define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind { ; X86-SSE-LABEL: mul_v8i16_0_1_7_15_31_63_127_255: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X64-SSE-LABEL: mul_v8i16_0_1_7_15_31_63_127_255: diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll --- a/llvm/test/CodeGen/X86/vector-rotate-128.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll @@ -265,9 +265,9 @@ ; ; X86-SSE2-LABEL: var_rotate_v4i32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: pslld $23, %xmm1 -; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0 @@ -444,7 +444,7 @@ ; ; X86-SSE2-LABEL: var_rotate_v8i16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7] ; X86-SSE2-NEXT: pslld $23, %xmm2 @@ -677,20 +677,20 @@ ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 ; X86-SSE2-NEXT: psrlw $4, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm5 ; X86-SSE2-NEXT: psllw $4, %xmm5 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm5 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm5 ; X86-SSE2-NEXT: por %xmm4, %xmm5 ; X86-SSE2-NEXT: pand %xmm3, %xmm5 ; X86-SSE2-NEXT: pandn %xmm2, %xmm3 ; X86-SSE2-NEXT: por %xmm5, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm2 ; X86-SSE2-NEXT: psrlw $6, %xmm2 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm3, %xmm4 ; X86-SSE2-NEXT: psllw $2, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm2, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pxor %xmm2, %xmm2 @@ -702,7 +702,7 @@ ; X86-SSE2-NEXT: paddb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm2, %xmm4 ; X86-SSE2-NEXT: psrlw $7, %xmm4 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4 ; X86-SSE2-NEXT: por %xmm3, %xmm4 ; X86-SSE2-NEXT: paddb %xmm1, %xmm1 ; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0 @@ -1070,7 +1070,7 @@ ; ; X86-SSE2-LABEL: splatvar_rotate_v8i16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0] ; X86-SSE2-NEXT: pand %xmm1, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 @@ -1285,7 +1285,7 @@ ; ; X86-SSE2-LABEL: splatvar_rotate_v16i8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; X86-SSE2-NEXT: psubb %xmm1, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm1, %xmm3 @@ -1801,20 +1801,20 @@ ; X86-SSE2-NEXT: pxor %xmm1, %xmm1 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE2-NEXT: psrlw $8, %xmm2 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm3 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: psrlw $8, %xmm3 ; X86-SSE2-NEXT: packuswb %xmm2, %xmm3 ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE2-NEXT: pand %xmm2, %xmm1 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pand %xmm2, %xmm0 ; X86-SSE2-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE2-NEXT: por %xmm3, %xmm0 @@ -2127,9 +2127,9 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 ; X86-SSE2-NEXT: retl %shl = shl <16 x i8> %a, @@ -2206,7 +2206,7 @@ ; X86-SSE2-LABEL: splatconstant_rotate_mask_v2i64: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlq $49, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl %shl = shl <2 x i64> %a, %lshr = lshr <2 x i64> %a, @@ -2288,7 +2288,7 @@ ; X86-SSE2-NEXT: psrld $28, %xmm1 ; X86-SSE2-NEXT: pslld $4, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl %shl = shl <4 x i32> %a, %lshr = lshr <4 x i32> %a, @@ -2372,7 +2372,7 @@ ; X86-SSE2-NEXT: psrlw $11, %xmm1 ; X86-SSE2-NEXT: psllw $5, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl %shl = shl <8 x i16> %a, %lshr = lshr <8 x i16> %a, @@ -2465,11 +2465,11 @@ ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE2-NEXT: psrlw $4, %xmm1 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl %shl = shl <16 x i8> %a, %lshr = lshr <16 x i8> %a, @@ -2551,7 +2551,7 @@ ; X86-SSE2-NEXT: psrld $11, %xmm1 ; X86-SSE2-NEXT: pslld $11, %xmm0 ; X86-SSE2-NEXT: por %xmm1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl %t0 = lshr <4 x i32> %x, %t1 = shl <4 x i32> %x, diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -3791,7 +3791,7 @@ ; X86-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] -; X86-SSE2-NEXT: paddw {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,0,0] ; X86-SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] ; X86-SSE2-NEXT: psllq $58, %xmm0 @@ -3835,7 +3835,7 @@ ; X86-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; X86-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0] -; X86-SSE41-NEXT: paddw {{\.LCPI.*}}, %xmm3 +; X86-SSE41-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3 ; X86-SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero ; X86-SSE41-NEXT: psllq $58, %xmm0 ; X86-SSE41-NEXT: movdqa %xmm0, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll --- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll @@ -1255,11 +1255,11 @@ ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; X86-SSE-NEXT: psraw $8, %xmm1 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: psrlw $8, %xmm1 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE-NEXT: psraw $8, %xmm0 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -1447,7 +1447,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v16i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X86-SSE-NEXT: pxor %xmm1, %xmm0 ; X86-SSE-NEXT: psubb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll --- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -1158,7 +1158,7 @@ ; ; X86-AVX2-LABEL: constant_shift_v8i32: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = ashr <8 x i32> %a, ret <8 x i32> %shift @@ -1230,18 +1230,18 @@ ; ; X86-AVX1-LABEL: constant_shift_v16i16: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpmulhw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm2 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7] ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpmulhw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: constant_shift_v16i16: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpmulhw {{\.LCPI.*}}, %ymm0, %ymm1 +; X86-AVX2-NEXT: vpmulhw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1 ; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; X86-AVX2-NEXT: vpsraw $1, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5,6,7] @@ -1379,11 +1379,11 @@ ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31] ; X86-AVX2-NEXT: vpsraw $8, %ymm1, %ymm1 -; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm1, %ymm1 +; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] ; X86-AVX2-NEXT: vpsraw $8, %ymm0, %ymm0 -; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl @@ -1651,7 +1651,7 @@ ; X86-AVX2-LABEL: splatconstant_shift_v32i8: ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X86-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll --- a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll @@ -2016,7 +2016,7 @@ ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE-NEXT: psraw $8, %xmm0 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -2104,7 +2104,7 @@ ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE-NEXT: psraw $8, %xmm0 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -2192,7 +2192,7 @@ ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE-NEXT: psraw $8, %xmm0 -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -2350,7 +2350,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v8i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X86-SSE-NEXT: pxor %xmm1, %xmm0 ; X86-SSE-NEXT: psubb %xmm1, %xmm0 @@ -2403,7 +2403,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v4i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X86-SSE-NEXT: pxor %xmm1, %xmm0 ; X86-SSE-NEXT: psubb %xmm1, %xmm0 @@ -2456,7 +2456,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v2i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X86-SSE-NEXT: pxor %xmm1, %xmm0 ; X86-SSE-NEXT: psubb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll --- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll @@ -488,7 +488,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -497,7 +497,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -505,7 +505,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm1 ; X86-SSE-NEXT: psrlw $1, %xmm0 ; X86-SSE-NEXT: pand %xmm2, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <16 x i8> %a, %b @@ -972,7 +972,7 @@ ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; X86-SSE-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE-NEXT: pandn %xmm0, %xmm2 -; X86-SSE-NEXT: pmulhuw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pand %xmm1, %xmm0 ; X86-SSE-NEXT: por %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -1073,10 +1073,10 @@ ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm2 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2 ; X86-SSE-NEXT: psrlw $8, %xmm2 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -1223,7 +1223,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v16i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <16 x i8> %a, ret <16 x i8> %shift diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll --- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll @@ -504,14 +504,14 @@ ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl @@ -875,7 +875,7 @@ ; ; X86-AVX2-LABEL: constant_shift_v4i64: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = lshr <4 x i64> %a, ret <4 x i64> %shift @@ -948,7 +948,7 @@ ; ; X86-AVX2-LABEL: constant_shift_v8i32: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = lshr <8 x i32> %a, ret <8 x i32> %shift @@ -1015,16 +1015,16 @@ ; ; X86-AVX1-LABEL: constant_shift_v16i16: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpmulhuw {{\.LCPI.*}}, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpmulhuw {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: constant_shift_v16i16: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpmulhuw {{\.LCPI.*}}, %ymm0, %ymm1 +; X86-AVX2-NEXT: vpmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1 ; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7] ; X86-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X86-AVX2-NEXT: retl @@ -1151,10 +1151,10 @@ ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31] -; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] -; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl @@ -1384,7 +1384,7 @@ ; X86-AVX2-LABEL: splatconstant_shift_v32i8: ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = lshr <32 x i8> %a, ret <32 x i8> %shift @@ -1454,7 +1454,7 @@ ; X86-AVX1-NEXT: vpsrlq $36, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpsrlq $36, %xmm0, %xmm0 ; X86-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; X86-AVX1-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vzeroupper ; X86-AVX1-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll --- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll @@ -595,7 +595,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -604,7 +604,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -612,7 +612,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm1 ; X86-SSE-NEXT: psrlw $1, %xmm0 ; X86-SSE-NEXT: pand %xmm2, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <8 x i8> %a, %b @@ -745,7 +745,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -754,7 +754,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -762,7 +762,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm1 ; X86-SSE-NEXT: psrlw $1, %xmm0 ; X86-SSE-NEXT: pand %xmm2, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <4 x i8> %a, %b @@ -895,7 +895,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -904,7 +904,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psrlw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -912,7 +912,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm1 ; X86-SSE-NEXT: psrlw $1, %xmm0 ; X86-SSE-NEXT: pand %xmm2, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm1, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <2 x i8> %a, %b @@ -1543,7 +1543,7 @@ ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535] ; X86-SSE-NEXT: movdqa %xmm1, %xmm2 ; X86-SSE-NEXT: pandn %xmm0, %xmm2 -; X86-SSE-NEXT: pmulhuw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pand %xmm1, %xmm0 ; X86-SSE-NEXT: por %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -1713,7 +1713,7 @@ ; X86-SSE-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -1809,7 +1809,7 @@ ; X86-SSE-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -1905,7 +1905,7 @@ ; X86-SSE-NEXT: movdqa %xmm0, %xmm2 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: psrlw $8, %xmm0 ; X86-SSE-NEXT: packuswb %xmm2, %xmm0 ; X86-SSE-NEXT: retl @@ -2052,7 +2052,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v8i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <8 x i8> %a, ret <8 x i8> %shift @@ -2091,7 +2091,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v4i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <4 x i8> %a, ret <4 x i8> %shift @@ -2130,7 +2130,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v2i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psrlw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = lshr <2 x i8> %a, ret <2 x i8> %shift diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll --- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll @@ -140,7 +140,7 @@ ; X86-SSE-LABEL: var_shift_v4i32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pslld $23, %xmm1 -; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE-NEXT: pmuludq %xmm1, %xmm0 @@ -402,7 +402,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -411,7 +411,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -844,7 +844,7 @@ ; ; X86-SSE-LABEL: constant_shift_v8i16: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <8 x i16> %a, ret <8 x i16> %shift @@ -942,11 +942,11 @@ ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movdqa %xmm0, %xmm1 ; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] ; X86-SSE-NEXT: pand %xmm2, %xmm1 ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pand %xmm2, %xmm0 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -1093,7 +1093,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v16i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psllw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <16 x i8> %a, ret <16 x i8> %shift diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll --- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll @@ -435,10 +435,10 @@ ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsllw $2, %ymm0, %ymm2 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2 @@ -800,7 +800,7 @@ ; ; X86-AVX2-LABEL: constant_shift_v4i64: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = shl <4 x i64> %a, ret <4 x i64> %shift @@ -845,15 +845,15 @@ ; ; X86-AVX1-LABEL: constant_shift_v8i32: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: constant_shift_v8i32: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = shl <8 x i32> %a, ret <8 x i32> %shift @@ -911,15 +911,15 @@ ; ; X86-AVX1-LABEL: constant_shift_v16i16: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1 ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; X86-AVX1-NEXT: retl ; ; X86-AVX2-LABEL: constant_shift_v16i16: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = shl <16 x i16> %a, ret <16 x i16> %shift @@ -1055,12 +1055,12 @@ ; X86-AVX2-LABEL: constant_shift_v32i8: ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 ; X86-AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32] ; X86-AVX2-NEXT: # ymm2 = mem[0,1,0,1] ; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpsllw $2, %ymm0, %ymm1 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 ; X86-AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ; X86-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1 @@ -1293,7 +1293,7 @@ ; X86-AVX2-LABEL: splatconstant_shift_v32i8: ; X86-AVX2: # %bb.0: ; X86-AVX2-NEXT: vpsllw $3, %ymm0, %ymm0 -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-AVX2-NEXT: retl %shift = shl <32 x i8> %a, ret <32 x i8> %shift diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll --- a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll @@ -76,7 +76,7 @@ ; X86-SSE-LABEL: var_shift_v2i32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pslld $23, %xmm1 -; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1 +; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 ; X86-SSE-NEXT: cvttps2dq %xmm1, %xmm1 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] ; X86-SSE-NEXT: pmuludq %xmm1, %xmm0 @@ -465,7 +465,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -474,7 +474,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -609,7 +609,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -618,7 +618,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -753,7 +753,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $4, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pxor %xmm3, %xmm3 @@ -762,7 +762,7 @@ ; X86-SSE-NEXT: pandn %xmm0, %xmm4 ; X86-SSE-NEXT: psllw $2, %xmm0 ; X86-SSE-NEXT: pand %xmm3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: por %xmm4, %xmm0 ; X86-SSE-NEXT: paddb %xmm1, %xmm1 ; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2 @@ -1369,7 +1369,7 @@ ; ; X86-SSE-LABEL: constant_shift_v4i16: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <4 x i16> %a, ret <4 x i16> %shift @@ -1431,7 +1431,7 @@ ; ; X86-SSE-LABEL: constant_shift_v2i16: ; X86-SSE: # %bb.0: -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <2 x i16> %a, ret <2 x i16> %shift @@ -1517,8 +1517,8 @@ ; X86-SSE-LABEL: constant_shift_v8i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -1606,8 +1606,8 @@ ; X86-SSE-LABEL: constant_shift_v4i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -1695,8 +1695,8 @@ ; X86-SSE-LABEL: constant_shift_v2i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: pxor %xmm1, %xmm1 ; X86-SSE-NEXT: packuswb %xmm1, %xmm0 ; X86-SSE-NEXT: retl @@ -1843,7 +1843,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v8i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psllw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <8 x i8> %a, ret <8 x i8> %shift @@ -1882,7 +1882,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v4i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psllw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <4 x i8> %a, ret <4 x i8> %shift @@ -1921,7 +1921,7 @@ ; X86-SSE-LABEL: splatconstant_shift_v2i8: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: psllw $3, %xmm0 -; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl %shift = shl <2 x i8> %a, ret <2 x i8> %shift diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll @@ -345,7 +345,7 @@ ; ; KNL32-LABEL: test_mm512_mask_blend_epi16: ; KNL32: # %bb.0: # %entry -; KNL32-NEXT: vpternlogd $216, {{\.LCPI.*}}{1to16}, %zmm1, %zmm0 +; KNL32-NEXT: vpternlogd $216, {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm1, %zmm0 ; KNL32-NEXT: retl entry: %0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -77,7 +77,7 @@ ; X86-LABEL: combine_permq_pshufb_as_vextracti128: ; X86: # %bb.0: ; X86-NEXT: vextracti128 $1, %ymm0, %xmm0 -; X86-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_permq_pshufb_as_vextracti128: @@ -97,7 +97,7 @@ ; X86-LABEL: combine_permq_pshufb_as_vmovdqa: ; X86: # %bb.0: ; X86-NEXT: vmovdqa %xmm0, %xmm0 -; X86-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_permq_pshufb_as_vmovdqa: @@ -210,7 +210,7 @@ ; X86-LABEL: combine_pshufb_as_vpbroadcastd128: ; X86: # %bb.0: ; X86-NEXT: vpbroadcastd %xmm0, %xmm0 -; X86-NEXT: vpaddb {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_pshufb_as_vpbroadcastd128: @@ -227,7 +227,7 @@ ; X86-LABEL: combine_permd_as_vpbroadcastd256: ; X86: # %bb.0: ; X86-NEXT: vpbroadcastd %xmm0, %ymm0 -; X86-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_permd_as_vpbroadcastd256: @@ -254,7 +254,7 @@ ; X86-LABEL: combine_permd_as_vpbroadcastq256: ; X86: # %bb.0: ; X86-NEXT: vpbroadcastq %xmm0, %ymm0 -; X86-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_permd_as_vpbroadcastq256: @@ -543,7 +543,7 @@ define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) { ; X86-LABEL: combine_psrlw_pshufb: ; X86: # %bb.0: -; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_psrlw_pshufb: @@ -559,7 +559,7 @@ define <32 x i8> @combine_pslld_pshufb(<8 x i32> %a0) { ; X86-LABEL: combine_pslld_pshufb: ; X86: # %bb.0: -; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_pslld_pshufb: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll @@ -901,8 +901,8 @@ ; X86: # %bb.0: ; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; X86-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0] -; X86-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm2, %ymm2 -; X86-NEXT: vinsertf64x4 $1, {{\.LCPI.*}}, %zmm2, %zmm2 +; X86-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 +; X86-NEXT: vinsertf64x4 $1, {{\.LCPI[0-9]+_[0-9]+}}, %zmm2, %zmm2 ; X86-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 ; X86-NEXT: vpermpd {{.*#+}} zmm0 = zmm2[2,3,1,1,6,7,5,5] ; X86-NEXT: retl diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll @@ -135,7 +135,7 @@ ; X86: # %bb.0: ; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; X86-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0] -; X86-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm2, %ymm2 +; X86-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2 ; X86-NEXT: vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0 ; X86-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,3] ; X86-NEXT: retl @@ -174,7 +174,7 @@ define <16 x i8> @combine_vpperm_identity_bitcast(<16 x i8> %a0, <16 x i8> %a1) { ; X86-LABEL: combine_vpperm_identity_bitcast: ; X86: # %bb.0: -; X86-NEXT: vpaddq {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: combine_vpperm_identity_bitcast: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -3112,10 +3112,10 @@ ; AVX: # %bb.0: ; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [NaN,NaN,0.0E+0,0.0E+0] ; AVX-NEXT: vmovaps %xmm0, (%rax) -; AVX-NEXT: vaddss {{\.LCPI.*}}+{{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vaddss {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vaddss {{\.LCPI.*}}+{{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vaddss {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vmovss %xmm0, (%rax) ; AVX-NEXT: retq store <4 x float> , <4 x float>* undef, align 16 diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll --- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll +++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll @@ -17,16 +17,16 @@ define <16 x i8> @test_128_i8_x_16_7_mask_lshr_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1: @@ -50,13 +50,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -78,13 +78,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_2(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_2: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $2, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_2: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -106,16 +106,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_3(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $3, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_3: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3: @@ -138,16 +138,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_4(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4: @@ -171,13 +171,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -199,13 +199,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_4(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $4, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -228,13 +228,13 @@ ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlw $5, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_5: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5: @@ -256,13 +256,13 @@ ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlw $6, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_6: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6: @@ -286,16 +286,16 @@ define <16 x i8> @test_128_i8_x_16_7_mask_ashr_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_ashr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1: @@ -319,13 +319,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -347,13 +347,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_2(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_2: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $2, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_2: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -375,16 +375,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_3(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $3, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_3: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3: @@ -407,16 +407,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_4(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4: @@ -440,7 +440,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64] ; X86-SSE2-NEXT: pxor %xmm1, %xmm0 @@ -449,7 +449,7 @@ ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64] ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 @@ -480,7 +480,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $4, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; X86-SSE2-NEXT: pxor %xmm1, %xmm0 @@ -489,7 +489,7 @@ ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8] ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 @@ -521,7 +521,7 @@ ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_5: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlw $5, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4] ; X86-SSE2-NEXT: pxor %xmm1, %xmm0 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0 @@ -530,7 +530,7 @@ ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_5: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4] ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 @@ -561,7 +561,7 @@ ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_6: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psrlw $6, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] ; X86-SSE2-NEXT: pxor %xmm1, %xmm0 ; X86-SSE2-NEXT: psubb %xmm1, %xmm0 @@ -570,7 +570,7 @@ ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_6: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] ; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0 @@ -603,13 +603,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddb %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -631,13 +631,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_4(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $4, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -660,13 +660,13 @@ ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psllw $5, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_5: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsllw $5, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5: @@ -688,13 +688,13 @@ ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6: ; X86-SSE2: # %bb.0: ; X86-SSE2-NEXT: psllw $6, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_6: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vpsllw $6, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6: @@ -716,13 +716,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddb %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -744,13 +744,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_2(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_2: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $2, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_2: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $2, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -772,13 +772,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_3(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_3: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $3, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_3: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $3, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -800,16 +800,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_4(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $4, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4: @@ -833,13 +833,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_shl_1(<16 x i8> %a0) { ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddb %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -868,13 +868,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -897,13 +897,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_3: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $3, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_3: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -925,13 +925,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $4, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -953,13 +953,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_5: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $5, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_5: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -981,13 +981,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_6: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $6, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_6: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1010,13 +1010,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1038,13 +1038,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_8(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $8, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_8: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1117,13 +1117,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_ashr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1146,13 +1146,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_3: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $3, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_3: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1174,13 +1174,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $4, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1202,13 +1202,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_5: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $5, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_5: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1230,13 +1230,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_6: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlw $6, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_6: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1259,13 +1259,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psraw $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsraw $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1287,13 +1287,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_8(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psraw $8, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_8: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsraw $8, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1366,13 +1366,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1394,13 +1394,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_8(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $8, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_8: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $8, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1471,13 +1471,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_3: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $3, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_3: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $3, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1499,13 +1499,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_4: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $4, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_4: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1527,13 +1527,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_5: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $5, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_5: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $5, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1555,13 +1555,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_6: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllw $6, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_6: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllw $6, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1584,13 +1584,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) { ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddw %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -1619,13 +1619,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_lshr_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1662,13 +1662,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $7, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_7: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1704,13 +1704,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $8, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_8: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1746,13 +1746,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $9, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_9: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1788,13 +1788,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $10, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_10: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1831,13 +1831,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1873,13 +1873,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_16(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $16, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -1966,13 +1966,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_ashr_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2009,13 +2009,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $7, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_7: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2051,13 +2051,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $8, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_8: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2093,13 +2093,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $9, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_9: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2135,13 +2135,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrld $10, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_10: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2178,13 +2178,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrad $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2220,13 +2220,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_16(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrad $16, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrad $16, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2313,13 +2313,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2355,13 +2355,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_16(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pslld $16, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_16: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpslld $16, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2446,13 +2446,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_7: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pslld $7, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_7: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpslld $7, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2488,13 +2488,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_8: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pslld $8, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_8: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpslld $8, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2530,13 +2530,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_9: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pslld $9, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_9: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpslld $9, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2572,13 +2572,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_10: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pslld $10, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_10: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpslld $10, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2615,13 +2615,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) { ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddd %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2664,13 +2664,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_lshr_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2693,13 +2693,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_15(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $15, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2721,13 +2721,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_16(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $16, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2749,13 +2749,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_17(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $17, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2777,13 +2777,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_18(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $18, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2806,13 +2806,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2834,13 +2834,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_32(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $32, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -2920,13 +2920,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_ashr_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $1, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2949,13 +2949,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_15(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $15, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -2977,13 +2977,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_16(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $16, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3005,13 +3005,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_17(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $17, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3033,13 +3033,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_18(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrlq $18, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3062,14 +3062,14 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psrad $1, %xmm0 -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7] @@ -3113,7 +3113,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3] ; X86-SSE2-NEXT: psrad $31, %xmm0 ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3] @@ -3123,7 +3123,7 @@ ; ; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1 ; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] @@ -3131,7 +3131,7 @@ ; ; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32: ; X86-AVX2: # %bb.0: -; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1 ; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] ; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] @@ -3284,13 +3284,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3312,13 +3312,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_32(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllq $32, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX1-LABEL: test_128_i64_x_2_2147483647_mask_shl_32: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0 ; X86-AVX1-NEXT: retl ; @@ -3396,13 +3396,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_15(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllq $15, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllq $15, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3424,13 +3424,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_16(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllq $16, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllq $16, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3452,13 +3452,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_17(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllq $17, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllq $17, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3480,13 +3480,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: psllq $18, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpsllq $18, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; @@ -3509,13 +3509,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) { ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE2-NEXT: paddq %xmm0, %xmm0 ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vshift-6.ll b/llvm/test/CodeGen/X86/vshift-6.ll --- a/llvm/test/CodeGen/X86/vshift-6.ll +++ b/llvm/test/CodeGen/X86/vshift-6.ll @@ -42,7 +42,7 @@ ; X86-NEXT: pxor %xmm0, %xmm0 ; X86-NEXT: pcmpgtb %xmm1, %xmm0 ; X86-NEXT: pxor %xmm0, %xmm2 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: por %xmm2, %xmm0 ; X86-NEXT: paddb %xmm1, %xmm1 ; X86-NEXT: pxor %xmm2, %xmm2 @@ -51,7 +51,7 @@ ; X86-NEXT: pandn %xmm0, %xmm4 ; X86-NEXT: psllw $2, %xmm0 ; X86-NEXT: pand %xmm2, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: por %xmm4, %xmm0 ; X86-NEXT: paddb %xmm1, %xmm1 ; X86-NEXT: pcmpgtb %xmm1, %xmm3 diff --git a/llvm/test/CodeGen/X86/widen_load-2.ll b/llvm/test/CodeGen/X86/widen_load-2.ll --- a/llvm/test/CodeGen/X86/widen_load-2.ll +++ b/llvm/test/CodeGen/X86/widen_load-2.ll @@ -359,7 +359,7 @@ ; X86-NEXT: movw $257, (%ecx) # imm = 0x101 ; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-NEXT: psrlw $1, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: pextrb $2, %xmm0, 2(%eax) ; X86-NEXT: pextrw $0, %xmm0, (%eax) ; X86-NEXT: retl $4 diff --git a/llvm/test/CodeGen/X86/x86-shifts.ll b/llvm/test/CodeGen/X86/x86-shifts.ll --- a/llvm/test/CodeGen/X86/x86-shifts.ll +++ b/llvm/test/CodeGen/X86/x86-shifts.ll @@ -131,7 +131,7 @@ ; X86: # %bb.0: # %entry ; X86-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4] ; X86-NEXT: pmullw %xmm0, %xmm1 -; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: pxor %xmm1, %xmm0 ; X86-NEXT: retl ; @@ -204,7 +204,7 @@ ; X86-LABEL: shl9: ; X86: # %bb.0: ; X86-NEXT: psllw $3, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: shl9: @@ -220,7 +220,7 @@ ; X86-LABEL: shr9: ; X86: # %bb.0: ; X86-NEXT: psrlw $3, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: shr9: @@ -247,7 +247,7 @@ ; X86-LABEL: sra_v16i8: ; X86: # %bb.0: ; X86-NEXT: psrlw $3, %xmm0 -; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16] ; X86-NEXT: pxor %xmm1, %xmm0 ; X86-NEXT: psubb %xmm1, %xmm0 diff --git a/llvm/test/CodeGen/X86/xop-mask-comments.ll b/llvm/test/CodeGen/X86/xop-mask-comments.ll --- a/llvm/test/CodeGen/X86/xop-mask-comments.ll +++ b/llvm/test/CodeGen/X86/xop-mask-comments.ll @@ -55,7 +55,7 @@ define <16 x i8> @vpperm_shuffle_general(<16 x i8> %a0, <16 x i8> %a1) { ; X86-LABEL: vpperm_shuffle_general: ; X86: # %bb.0: -; X86-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0 +; X86-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: vpperm_shuffle_general: diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll --- a/llvm/test/CodeGen/X86/xor.ll +++ b/llvm/test/CodeGen/X86/xor.ll @@ -373,7 +373,7 @@ define <4 x i32> @test10(<4 x i32> %a) nounwind { ; X86-LABEL: test10: ; X86: # %bb.0: -; X86-NEXT: andnps {{\.LCPI.*}}, %xmm0 +; X86-NEXT: andnps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-NEXT: retl ; ; X64-LIN-LABEL: test10: diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py --- a/llvm/utils/UpdateTestChecks/asm.py +++ b/llvm/utils/UpdateTestChecks/asm.py @@ -197,7 +197,7 @@ # Generically match a RIP-relative memory operand. asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm) # Generically match a LCP symbol. - asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm) + asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI[0-9]+_[0-9]+}}', asm) if getattr(args, 'extra_scrub', False): # Avoid generating different checks for 32- and 64-bit because of 'retl' vs 'retq'. asm = SCRUB_X86_RET_RE.sub(r'ret{{[l|q]}}', asm)