diff --git a/llvm/test/CodeGen/X86/avx512-cvt.ll b/llvm/test/CodeGen/X86/avx512-cvt.ll --- a/llvm/test/CodeGen/X86/avx512-cvt.ll +++ b/llvm/test/CodeGen/X86/avx512-cvt.ll @@ -1,11 +1,11 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=KNL -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLBW --check-prefix=SKX -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLNOBW --check-prefix=AVX512VL -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=DQNOVL --check-prefix=AVX512DQ -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=NOVL --check-prefix=NODQ --check-prefix=NOVLDQ --check-prefix=AVX512BW -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=VL --check-prefix=VLDQ --check-prefix=VLNOBW --check-prefix=AVX512VLDQ -; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=NODQ --check-prefix=VL --check-prefix=VLNODQ --check-prefix=VLBW --check-prefix=AVX512VLBW +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=ALL,NOVL,NODQ,NOVLDQ,KNL +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s --check-prefixes=ALL,VL,VLDQ,VLBW +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefixes=ALL,NODQ,VL,VLNODQ,VLNOBW +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512dq | FileCheck %s --check-prefixes=ALL,NOVL,DQNOVL +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefixes=ALL,NOVL,NODQ,NOVLDQ,AVX512BW +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512dq | FileCheck %s --check-prefixes=ALL,VL,VLDQ,VLNOBW +; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=avx512vl,avx512bw | FileCheck %s --check-prefixes=ALL,NODQ,VL,VLNODQ,VLBW define <16 x float> @sitof32(<16 x i32> %a) nounwind { diff --git a/llvm/test/CodeGen/X86/bswap-vector.ll b/llvm/test/CodeGen/X86/bswap-vector.ll --- a/llvm/test/CodeGen/X86/bswap-vector.ll +++ b/llvm/test/CodeGen/X86/bswap-vector.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-SSE,CHECK-NOSSSE3,CHECK-SSE-X86,CHECK-NOSSSE3-X86 -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-SSE,CHECK-SSE-X64,CHECK-NOSSSE3,CHECK-NOSSSE3-X64 +; RUN: llc < %s -mtriple=i686-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-SSE,CHECK-NOSSSE3,CHECK-SSE-X86 +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-SSE,CHECK-SSE-X64,CHECK-NOSSSE3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+ssse3 | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-SSE,CHECK-SSE-X64,CHECK-SSSE3 -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-AVX,CHECK-AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK-ALL,CHECK-AVX declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>) declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>) diff --git a/llvm/test/CodeGen/X86/extract-bits.ll b/llvm/test/CodeGen/X86/extract-bits.ll --- a/llvm/test/CodeGen/X86/extract-bits.ll +++ b/llvm/test/CodeGen/X86/extract-bits.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI,X86-NOBMI -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1NOTBM,X86-BMI1NOTBM -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1TBM,X86-BMI1TBM -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1BMI2,X86-BMI1BMI2,BMI1TBM,X86-BMI1TBM,BMI1TBMBMI2,X86-BMI1TBMBMI2 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1BMI2,X86-BMI1BMI2,BMI1NOTBMBMI2,X86-BMI1NOTBMBMI2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI,X64-NOBMI -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1NOTBM,X64-BMI1NOTBM -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1TBM,X64-BMI1TBM -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1BMI2,X64-BMI1BMI2,BMI1TBM,X64-BMI1TBM,BMI1TBMBMI2,X64-BMI1TBMBMI2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1BMI2,X64-BMI1BMI2,BMI1NOTBMBMI2,X64-BMI1NOTBMBMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMI1NOTBM +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMI1TBM +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMI1BMI2,X86-BMI1TBM +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X86,X86-BMI1BMI2,X86-BMI1NOTBMBMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-NOBMI +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMI1NOTBM +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMI1TBM +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMI1BMI2,X64-BMI1TBM +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2,+fast-bextr < %s | FileCheck %s --check-prefixes=X64,X64-BMI1BMI2,X64-BMI1NOTBMBMI2 ; *Please* keep in sync with test/CodeGen/AArch64/extract-bits.ll diff --git a/llvm/test/CodeGen/X86/extract-lowbits.ll b/llvm/test/CodeGen/X86/extract-lowbits.ll --- a/llvm/test/CodeGen/X86/extract-lowbits.ll +++ b/llvm/test/CodeGen/X86/extract-lowbits.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,NOBMI,X86-NOBMI -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1NOTBM,X86-BMI1NOTBM -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1TBM,X86-BMI1TBM -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1BMI2,X86-BMI1BMI2,BMI1TBM,X86-BMI1TBM,BMI1TBMBMI2,X86-BMI1TBMBMI2 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,BMI1,X86-BMI1,BMI1BMI2,X86-BMI1BMI2,BMI1NOTBMBMI2,X86-BMI1NOTBMBMI2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,NOBMI,X64-NOBMI -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1NOTBM,X64-BMI1NOTBM -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1TBM,X64-BMI1TBM -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1BMI2,X64-BMI1BMI2,BMI1TBM,X64-BMI1TBM,BMI1TBMBMI2,X64-BMI1TBMBMI2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,BMI1,X64-BMI1,BMI1BMI2,X64-BMI1BMI2,BMI1NOTBMBMI2,X64-BMI1NOTBMBMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-NOBMI +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI1NOTBM +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefix=X86 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI1BMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI1BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=-bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-NOBMI +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-BMI1NOTBM +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,-bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-BMI1TBM +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,+tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-BMI1BMI2,X64-BMI1TBM +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi,-tbm,+bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-BMI1BMI2,X64-BMI1NOTBMBMI2 ; *Please* keep in sync with test/CodeGen/AArch64/extract-lowbits.ll @@ -868,7 +868,7 @@ ; X86-NOBMI-NEXT: movb {{[0-9]+}}(%esp), %bl ; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: movl %esi, (%esp) -; X86-NOBMI-NEXT: calll use32 +; X86-NOBMI-NEXT: calll use32@PLT ; X86-NOBMI-NEXT: movl $1, %eax ; X86-NOBMI-NEXT: movl %ebx, %ecx ; X86-NOBMI-NEXT: shll %cl, %eax @@ -887,7 +887,7 @@ ; X86-BMI1NOTBM-NEXT: movb {{[0-9]+}}(%esp), %bl ; X86-BMI1NOTBM-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: movl %esi, (%esp) -; X86-BMI1NOTBM-NEXT: calll use32 +; X86-BMI1NOTBM-NEXT: calll use32@PLT ; X86-BMI1NOTBM-NEXT: shll $8, %ebx ; X86-BMI1NOTBM-NEXT: bextrl %ebx, %esi, %eax ; X86-BMI1NOTBM-NEXT: addl $4, %esp @@ -903,7 +903,7 @@ ; X86-BMI1BMI2-NEXT: movb {{[0-9]+}}(%esp), %bl ; X86-BMI1BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-BMI1BMI2-NEXT: movl %esi, (%esp) -; X86-BMI1BMI2-NEXT: calll use32 +; X86-BMI1BMI2-NEXT: calll use32@PLT ; X86-BMI1BMI2-NEXT: bzhil %ebx, %esi, %eax ; X86-BMI1BMI2-NEXT: addl $4, %esp ; X86-BMI1BMI2-NEXT: popl %esi @@ -917,7 +917,7 @@ ; X64-NOBMI-NEXT: pushq %rax ; X64-NOBMI-NEXT: movl %esi, %ebp ; X64-NOBMI-NEXT: movq %rdi, %rbx -; X64-NOBMI-NEXT: callq use32 +; X64-NOBMI-NEXT: callq use32@PLT ; X64-NOBMI-NEXT: movl $1, %eax ; X64-NOBMI-NEXT: movl %ebp, %ecx ; X64-NOBMI-NEXT: shll %cl, %eax @@ -935,7 +935,7 @@ ; X64-BMI1NOTBM-NEXT: pushq %rax ; X64-BMI1NOTBM-NEXT: movl %esi, %ebx ; X64-BMI1NOTBM-NEXT: movq %rdi, %r14 -; X64-BMI1NOTBM-NEXT: callq use32 +; X64-BMI1NOTBM-NEXT: callq use32@PLT ; X64-BMI1NOTBM-NEXT: shll $8, %ebx ; X64-BMI1NOTBM-NEXT: bextrl %ebx, %r14d, %eax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -950,7 +950,7 @@ ; X64-BMI1BMI2-NEXT: pushq %rax ; X64-BMI1BMI2-NEXT: movl %esi, %ebp ; X64-BMI1BMI2-NEXT: movq %rdi, %rbx -; X64-BMI1BMI2-NEXT: callq use32 +; X64-BMI1BMI2-NEXT: callq use32@PLT ; X64-BMI1BMI2-NEXT: bzhil %ebp, %ebx, %eax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx @@ -2057,7 +2057,7 @@ ; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-NOBMI-NEXT: shrl %cl, %esi ; X86-NOBMI-NEXT: movl %esi, (%esp) -; X86-NOBMI-NEXT: calll use32 +; X86-NOBMI-NEXT: calll use32@PLT ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: movl %esi, %eax ; X86-NOBMI-NEXT: addl $8, %esp @@ -2074,7 +2074,7 @@ ; X86-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-BMI1NOTBM-NEXT: shrl %cl, %esi ; X86-BMI1NOTBM-NEXT: movl %esi, (%esp) -; X86-BMI1NOTBM-NEXT: calll use32 +; X86-BMI1NOTBM-NEXT: calll use32@PLT ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: movl %esi, %eax ; X86-BMI1NOTBM-NEXT: addl $8, %esp @@ -2091,7 +2091,7 @@ ; X86-BMI1BMI2-NEXT: movl $-1, %ecx ; X86-BMI1BMI2-NEXT: shrxl %eax, %ecx, %eax ; X86-BMI1BMI2-NEXT: movl %eax, (%esp) -; X86-BMI1BMI2-NEXT: calll use32 +; X86-BMI1BMI2-NEXT: calll use32@PLT ; X86-BMI1BMI2-NEXT: bzhil %ebx, {{[0-9]+}}(%esp), %eax ; X86-BMI1BMI2-NEXT: addl $8, %esp ; X86-BMI1BMI2-NEXT: popl %ebx @@ -2109,7 +2109,7 @@ ; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NOBMI-NEXT: shrl %cl, %ebp ; X64-NOBMI-NEXT: movl %ebp, %edi -; X64-NOBMI-NEXT: callq use32 +; X64-NOBMI-NEXT: callq use32@PLT ; X64-NOBMI-NEXT: andl %ebx, %ebp ; X64-NOBMI-NEXT: movl %ebp, %eax ; X64-NOBMI-NEXT: addq $8, %rsp @@ -2129,7 +2129,7 @@ ; X64-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-BMI1NOTBM-NEXT: shrl %cl, %ebp ; X64-BMI1NOTBM-NEXT: movl %ebp, %edi -; X64-BMI1NOTBM-NEXT: callq use32 +; X64-BMI1NOTBM-NEXT: callq use32@PLT ; X64-BMI1NOTBM-NEXT: andl %ebx, %ebp ; X64-BMI1NOTBM-NEXT: movl %ebp, %eax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -2148,7 +2148,7 @@ ; X64-BMI1BMI2-NEXT: negb %al ; X64-BMI1BMI2-NEXT: movl $-1, %ecx ; X64-BMI1BMI2-NEXT: shrxl %eax, %ecx, %edi -; X64-BMI1BMI2-NEXT: callq use32 +; X64-BMI1BMI2-NEXT: callq use32@PLT ; X64-BMI1BMI2-NEXT: bzhil %ebx, %ebp, %eax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx @@ -2172,7 +2172,7 @@ ; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-NOBMI-NEXT: shrl %cl, %esi ; X86-NOBMI-NEXT: movl %esi, (%esp) -; X86-NOBMI-NEXT: calll use32 +; X86-NOBMI-NEXT: calll use32@PLT ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: movl %esi, %eax ; X86-NOBMI-NEXT: addl $8, %esp @@ -2189,7 +2189,7 @@ ; X86-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-BMI1NOTBM-NEXT: shrl %cl, %esi ; X86-BMI1NOTBM-NEXT: movl %esi, (%esp) -; X86-BMI1NOTBM-NEXT: calll use32 +; X86-BMI1NOTBM-NEXT: calll use32@PLT ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: movl %esi, %eax ; X86-BMI1NOTBM-NEXT: addl $8, %esp @@ -2206,7 +2206,7 @@ ; X86-BMI1BMI2-NEXT: movl $-1, %ecx ; X86-BMI1BMI2-NEXT: shrxl %eax, %ecx, %eax ; X86-BMI1BMI2-NEXT: movl %eax, (%esp) -; X86-BMI1BMI2-NEXT: calll use32 +; X86-BMI1BMI2-NEXT: calll use32@PLT ; X86-BMI1BMI2-NEXT: bzhil %ebx, {{[0-9]+}}(%esp), %eax ; X86-BMI1BMI2-NEXT: addl $8, %esp ; X86-BMI1BMI2-NEXT: popl %ebx @@ -2224,7 +2224,7 @@ ; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NOBMI-NEXT: shrl %cl, %ebp ; X64-NOBMI-NEXT: movl %ebp, %edi -; X64-NOBMI-NEXT: callq use32 +; X64-NOBMI-NEXT: callq use32@PLT ; X64-NOBMI-NEXT: andl %ebx, %ebp ; X64-NOBMI-NEXT: movl %ebp, %eax ; X64-NOBMI-NEXT: addq $8, %rsp @@ -2244,7 +2244,7 @@ ; X64-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-BMI1NOTBM-NEXT: shrl %cl, %ebp ; X64-BMI1NOTBM-NEXT: movl %ebp, %edi -; X64-BMI1NOTBM-NEXT: callq use32 +; X64-BMI1NOTBM-NEXT: callq use32@PLT ; X64-BMI1NOTBM-NEXT: andl %ebx, %ebp ; X64-BMI1NOTBM-NEXT: movl %ebp, %eax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -2263,7 +2263,7 @@ ; X64-BMI1BMI2-NEXT: negb %al ; X64-BMI1BMI2-NEXT: movl $-1, %ecx ; X64-BMI1BMI2-NEXT: shrxl %eax, %ecx, %edi -; X64-BMI1BMI2-NEXT: callq use32 +; X64-BMI1BMI2-NEXT: callq use32@PLT ; X64-BMI1BMI2-NEXT: bzhil %ebx, %ebp, %eax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx @@ -2291,7 +2291,7 @@ ; X86-NOBMI-NEXT: movl (%eax), %esi ; X86-NOBMI-NEXT: andl %edx, %esi ; X86-NOBMI-NEXT: movl %edx, (%esp) -; X86-NOBMI-NEXT: calll use32 +; X86-NOBMI-NEXT: calll use32@PLT ; X86-NOBMI-NEXT: movl %esi, %eax ; X86-NOBMI-NEXT: addl $8, %esp ; X86-NOBMI-NEXT: popl %esi @@ -2310,7 +2310,7 @@ ; X86-BMI1NOTBM-NEXT: movl (%eax), %esi ; X86-BMI1NOTBM-NEXT: andl %edx, %esi ; X86-BMI1NOTBM-NEXT: movl %edx, (%esp) -; X86-BMI1NOTBM-NEXT: calll use32 +; X86-BMI1NOTBM-NEXT: calll use32@PLT ; X86-BMI1NOTBM-NEXT: movl %esi, %eax ; X86-BMI1NOTBM-NEXT: addl $8, %esp ; X86-BMI1NOTBM-NEXT: popl %esi @@ -2328,7 +2328,7 @@ ; X86-BMI1BMI2-NEXT: movl $-1, %eax ; X86-BMI1BMI2-NEXT: shrxl %ecx, %eax, %eax ; X86-BMI1BMI2-NEXT: movl %eax, (%esp) -; X86-BMI1BMI2-NEXT: calll use32 +; X86-BMI1BMI2-NEXT: calll use32@PLT ; X86-BMI1BMI2-NEXT: movl %esi, %eax ; X86-BMI1BMI2-NEXT: addl $8, %esp ; X86-BMI1BMI2-NEXT: popl %esi @@ -2345,7 +2345,7 @@ ; X64-NOBMI-NEXT: movl (%rdi), %ebx ; X64-NOBMI-NEXT: andl %eax, %ebx ; X64-NOBMI-NEXT: movl %eax, %edi -; X64-NOBMI-NEXT: callq use32 +; X64-NOBMI-NEXT: callq use32@PLT ; X64-NOBMI-NEXT: movl %ebx, %eax ; X64-NOBMI-NEXT: popq %rbx ; X64-NOBMI-NEXT: retq @@ -2361,7 +2361,7 @@ ; X64-BMI1NOTBM-NEXT: movl (%rdi), %ebx ; X64-BMI1NOTBM-NEXT: andl %eax, %ebx ; X64-BMI1NOTBM-NEXT: movl %eax, %edi -; X64-BMI1NOTBM-NEXT: callq use32 +; X64-BMI1NOTBM-NEXT: callq use32@PLT ; X64-BMI1NOTBM-NEXT: movl %ebx, %eax ; X64-BMI1NOTBM-NEXT: popq %rbx ; X64-BMI1NOTBM-NEXT: retq @@ -2374,7 +2374,7 @@ ; X64-BMI1BMI2-NEXT: negb %sil ; X64-BMI1BMI2-NEXT: movl $-1, %eax ; X64-BMI1BMI2-NEXT: shrxl %esi, %eax, %edi -; X64-BMI1BMI2-NEXT: callq use32 +; X64-BMI1BMI2-NEXT: callq use32@PLT ; X64-BMI1BMI2-NEXT: movl %ebx, %eax ; X64-BMI1BMI2-NEXT: popq %rbx ; X64-BMI1BMI2-NEXT: retq @@ -2400,7 +2400,7 @@ ; X86-NOBMI-NEXT: movl (%eax), %esi ; X86-NOBMI-NEXT: andl %edx, %esi ; X86-NOBMI-NEXT: movl %edx, (%esp) -; X86-NOBMI-NEXT: calll use32 +; X86-NOBMI-NEXT: calll use32@PLT ; X86-NOBMI-NEXT: movl %esi, %eax ; X86-NOBMI-NEXT: addl $8, %esp ; X86-NOBMI-NEXT: popl %esi @@ -2419,7 +2419,7 @@ ; X86-BMI1NOTBM-NEXT: movl (%eax), %esi ; X86-BMI1NOTBM-NEXT: andl %edx, %esi ; X86-BMI1NOTBM-NEXT: movl %edx, (%esp) -; X86-BMI1NOTBM-NEXT: calll use32 +; X86-BMI1NOTBM-NEXT: calll use32@PLT ; X86-BMI1NOTBM-NEXT: movl %esi, %eax ; X86-BMI1NOTBM-NEXT: addl $8, %esp ; X86-BMI1NOTBM-NEXT: popl %esi @@ -2437,7 +2437,7 @@ ; X86-BMI1BMI2-NEXT: movl $-1, %eax ; X86-BMI1BMI2-NEXT: shrxl %ecx, %eax, %eax ; X86-BMI1BMI2-NEXT: movl %eax, (%esp) -; X86-BMI1BMI2-NEXT: calll use32 +; X86-BMI1BMI2-NEXT: calll use32@PLT ; X86-BMI1BMI2-NEXT: movl %esi, %eax ; X86-BMI1BMI2-NEXT: addl $8, %esp ; X86-BMI1BMI2-NEXT: popl %esi @@ -2454,7 +2454,7 @@ ; X64-NOBMI-NEXT: movl (%rdi), %ebx ; X64-NOBMI-NEXT: andl %eax, %ebx ; X64-NOBMI-NEXT: movl %eax, %edi -; X64-NOBMI-NEXT: callq use32 +; X64-NOBMI-NEXT: callq use32@PLT ; X64-NOBMI-NEXT: movl %ebx, %eax ; X64-NOBMI-NEXT: popq %rbx ; X64-NOBMI-NEXT: retq @@ -2470,7 +2470,7 @@ ; X64-BMI1NOTBM-NEXT: movl (%rdi), %ebx ; X64-BMI1NOTBM-NEXT: andl %eax, %ebx ; X64-BMI1NOTBM-NEXT: movl %eax, %edi -; X64-BMI1NOTBM-NEXT: callq use32 +; X64-BMI1NOTBM-NEXT: callq use32@PLT ; X64-BMI1NOTBM-NEXT: movl %ebx, %eax ; X64-BMI1NOTBM-NEXT: popq %rbx ; X64-BMI1NOTBM-NEXT: retq @@ -2483,7 +2483,7 @@ ; X64-BMI1BMI2-NEXT: negb %sil ; X64-BMI1BMI2-NEXT: movl $-1, %eax ; X64-BMI1BMI2-NEXT: shrxl %esi, %eax, %edi -; X64-BMI1BMI2-NEXT: callq use32 +; X64-BMI1BMI2-NEXT: callq use32@PLT ; X64-BMI1BMI2-NEXT: movl %ebx, %eax ; X64-BMI1BMI2-NEXT: popq %rbx ; X64-BMI1BMI2-NEXT: retq @@ -2507,7 +2507,7 @@ ; X86-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-NOBMI-NEXT: shrl %cl, %esi ; X86-NOBMI-NEXT: movl %esi, (%esp) -; X86-NOBMI-NEXT: calll use32 +; X86-NOBMI-NEXT: calll use32@PLT ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: movl %esi, %eax ; X86-NOBMI-NEXT: addl $8, %esp @@ -2524,7 +2524,7 @@ ; X86-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X86-BMI1NOTBM-NEXT: shrl %cl, %esi ; X86-BMI1NOTBM-NEXT: movl %esi, (%esp) -; X86-BMI1NOTBM-NEXT: calll use32 +; X86-BMI1NOTBM-NEXT: calll use32@PLT ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: movl %esi, %eax ; X86-BMI1NOTBM-NEXT: addl $8, %esp @@ -2541,7 +2541,7 @@ ; X86-BMI1BMI2-NEXT: movl $-1, %ecx ; X86-BMI1BMI2-NEXT: shrxl %eax, %ecx, %eax ; X86-BMI1BMI2-NEXT: movl %eax, (%esp) -; X86-BMI1BMI2-NEXT: calll use32 +; X86-BMI1BMI2-NEXT: calll use32@PLT ; X86-BMI1BMI2-NEXT: bzhil %ebx, {{[0-9]+}}(%esp), %eax ; X86-BMI1BMI2-NEXT: addl $8, %esp ; X86-BMI1BMI2-NEXT: popl %ebx @@ -2559,7 +2559,7 @@ ; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NOBMI-NEXT: shrl %cl, %ebp ; X64-NOBMI-NEXT: movl %ebp, %edi -; X64-NOBMI-NEXT: callq use32 +; X64-NOBMI-NEXT: callq use32@PLT ; X64-NOBMI-NEXT: andl %ebx, %ebp ; X64-NOBMI-NEXT: movl %ebp, %eax ; X64-NOBMI-NEXT: addq $8, %rsp @@ -2579,7 +2579,7 @@ ; X64-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-BMI1NOTBM-NEXT: shrl %cl, %ebp ; X64-BMI1NOTBM-NEXT: movl %ebp, %edi -; X64-BMI1NOTBM-NEXT: callq use32 +; X64-BMI1NOTBM-NEXT: callq use32@PLT ; X64-BMI1NOTBM-NEXT: andl %ebx, %ebp ; X64-BMI1NOTBM-NEXT: movl %ebp, %eax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -2598,7 +2598,7 @@ ; X64-BMI1BMI2-NEXT: negb %al ; X64-BMI1BMI2-NEXT: movl $-1, %ecx ; X64-BMI1BMI2-NEXT: shrxl %eax, %ecx, %edi -; X64-BMI1BMI2-NEXT: callq use32 +; X64-BMI1BMI2-NEXT: callq use32@PLT ; X64-BMI1BMI2-NEXT: bzhil %ebx, %ebp, %eax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx @@ -2635,7 +2635,7 @@ ; X86-NOBMI-NEXT: subl $8, %esp ; X86-NOBMI-NEXT: pushl %edi ; X86-NOBMI-NEXT: pushl %esi -; X86-NOBMI-NEXT: calll use64 +; X86-NOBMI-NEXT: calll use64@PLT ; X86-NOBMI-NEXT: addl $16, %esp ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edi @@ -2665,7 +2665,7 @@ ; X86-BMI1NOTBM-NEXT: subl $8, %esp ; X86-BMI1NOTBM-NEXT: pushl %edi ; X86-BMI1NOTBM-NEXT: pushl %esi -; X86-BMI1NOTBM-NEXT: calll use64 +; X86-BMI1NOTBM-NEXT: calll use64@PLT ; X86-BMI1NOTBM-NEXT: addl $16, %esp ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %edi @@ -2694,7 +2694,7 @@ ; X86-BMI1BMI2-NEXT: subl $8, %esp ; X86-BMI1BMI2-NEXT: pushl %esi ; X86-BMI1BMI2-NEXT: pushl %edi -; X86-BMI1BMI2-NEXT: calll use64 +; X86-BMI1BMI2-NEXT: calll use64@PLT ; X86-BMI1BMI2-NEXT: addl $16, %esp ; X86-BMI1BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi ; X86-BMI1BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi @@ -2717,7 +2717,7 @@ ; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $rcx ; X64-NOBMI-NEXT: shrq %cl, %rbx ; X64-NOBMI-NEXT: movq %rbx, %rdi -; X64-NOBMI-NEXT: callq use64 +; X64-NOBMI-NEXT: callq use64@PLT ; X64-NOBMI-NEXT: andq %r14, %rbx ; X64-NOBMI-NEXT: movq %rbx, %rax ; X64-NOBMI-NEXT: addq $8, %rsp @@ -2737,7 +2737,7 @@ ; X64-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $rcx ; X64-BMI1NOTBM-NEXT: shrq %cl, %rbx ; X64-BMI1NOTBM-NEXT: movq %rbx, %rdi -; X64-BMI1NOTBM-NEXT: callq use64 +; X64-BMI1NOTBM-NEXT: callq use64@PLT ; X64-BMI1NOTBM-NEXT: andq %r14, %rbx ; X64-BMI1NOTBM-NEXT: movq %rbx, %rax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -2756,7 +2756,7 @@ ; X64-BMI1BMI2-NEXT: negb %al ; X64-BMI1BMI2-NEXT: movq $-1, %rcx ; X64-BMI1BMI2-NEXT: shrxq %rax, %rcx, %rdi -; X64-BMI1BMI2-NEXT: callq use64 +; X64-BMI1BMI2-NEXT: callq use64@PLT ; X64-BMI1BMI2-NEXT: bzhiq %rbx, %r14, %rax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx @@ -2789,7 +2789,7 @@ ; X86-NOBMI-NEXT: subl $8, %esp ; X86-NOBMI-NEXT: pushl %edi ; X86-NOBMI-NEXT: pushl %esi -; X86-NOBMI-NEXT: calll use64 +; X86-NOBMI-NEXT: calll use64@PLT ; X86-NOBMI-NEXT: addl $16, %esp ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edi @@ -2819,7 +2819,7 @@ ; X86-BMI1NOTBM-NEXT: subl $8, %esp ; X86-BMI1NOTBM-NEXT: pushl %edi ; X86-BMI1NOTBM-NEXT: pushl %esi -; X86-BMI1NOTBM-NEXT: calll use64 +; X86-BMI1NOTBM-NEXT: calll use64@PLT ; X86-BMI1NOTBM-NEXT: addl $16, %esp ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %edi @@ -2848,7 +2848,7 @@ ; X86-BMI1BMI2-NEXT: subl $8, %esp ; X86-BMI1BMI2-NEXT: pushl %esi ; X86-BMI1BMI2-NEXT: pushl %edi -; X86-BMI1BMI2-NEXT: calll use64 +; X86-BMI1BMI2-NEXT: calll use64@PLT ; X86-BMI1BMI2-NEXT: addl $16, %esp ; X86-BMI1BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi ; X86-BMI1BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi @@ -2871,7 +2871,7 @@ ; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-NOBMI-NEXT: shrq %cl, %rbx ; X64-NOBMI-NEXT: movq %rbx, %rdi -; X64-NOBMI-NEXT: callq use64 +; X64-NOBMI-NEXT: callq use64@PLT ; X64-NOBMI-NEXT: andq %r14, %rbx ; X64-NOBMI-NEXT: movq %rbx, %rax ; X64-NOBMI-NEXT: addq $8, %rsp @@ -2891,7 +2891,7 @@ ; X64-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $ecx ; X64-BMI1NOTBM-NEXT: shrq %cl, %rbx ; X64-BMI1NOTBM-NEXT: movq %rbx, %rdi -; X64-BMI1NOTBM-NEXT: callq use64 +; X64-BMI1NOTBM-NEXT: callq use64@PLT ; X64-BMI1NOTBM-NEXT: andq %r14, %rbx ; X64-BMI1NOTBM-NEXT: movq %rbx, %rax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -2910,7 +2910,7 @@ ; X64-BMI1BMI2-NEXT: negb %al ; X64-BMI1BMI2-NEXT: movq $-1, %rcx ; X64-BMI1BMI2-NEXT: shrxq %rax, %rcx, %rdi -; X64-BMI1BMI2-NEXT: callq use64 +; X64-BMI1BMI2-NEXT: callq use64@PLT ; X64-BMI1BMI2-NEXT: bzhiq %rbx, %r14, %rax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx @@ -2949,7 +2949,7 @@ ; X86-NOBMI-NEXT: subl $8, %esp ; X86-NOBMI-NEXT: pushl %ebx ; X86-NOBMI-NEXT: pushl %eax -; X86-NOBMI-NEXT: calll use64 +; X86-NOBMI-NEXT: calll use64@PLT ; X86-NOBMI-NEXT: addl $16, %esp ; X86-NOBMI-NEXT: movl %edi, %eax ; X86-NOBMI-NEXT: movl %esi, %edx @@ -2982,7 +2982,7 @@ ; X86-BMI1NOTBM-NEXT: subl $8, %esp ; X86-BMI1NOTBM-NEXT: pushl %ebx ; X86-BMI1NOTBM-NEXT: pushl %eax -; X86-BMI1NOTBM-NEXT: calll use64 +; X86-BMI1NOTBM-NEXT: calll use64@PLT ; X86-BMI1NOTBM-NEXT: addl $16, %esp ; X86-BMI1NOTBM-NEXT: movl %edi, %eax ; X86-BMI1NOTBM-NEXT: movl %esi, %edx @@ -3014,7 +3014,7 @@ ; X86-BMI1BMI2-NEXT: subl $8, %esp ; X86-BMI1BMI2-NEXT: pushl %edx ; X86-BMI1BMI2-NEXT: pushl %ecx -; X86-BMI1BMI2-NEXT: calll use64 +; X86-BMI1BMI2-NEXT: calll use64@PLT ; X86-BMI1BMI2-NEXT: addl $16, %esp ; X86-BMI1BMI2-NEXT: movl %edi, %eax ; X86-BMI1BMI2-NEXT: movl %esi, %edx @@ -3034,7 +3034,7 @@ ; X64-NOBMI-NEXT: movq (%rdi), %rbx ; X64-NOBMI-NEXT: andq %rax, %rbx ; X64-NOBMI-NEXT: movq %rax, %rdi -; X64-NOBMI-NEXT: callq use64 +; X64-NOBMI-NEXT: callq use64@PLT ; X64-NOBMI-NEXT: movq %rbx, %rax ; X64-NOBMI-NEXT: popq %rbx ; X64-NOBMI-NEXT: retq @@ -3050,7 +3050,7 @@ ; X64-BMI1NOTBM-NEXT: movq (%rdi), %rbx ; X64-BMI1NOTBM-NEXT: andq %rax, %rbx ; X64-BMI1NOTBM-NEXT: movq %rax, %rdi -; X64-BMI1NOTBM-NEXT: callq use64 +; X64-BMI1NOTBM-NEXT: callq use64@PLT ; X64-BMI1NOTBM-NEXT: movq %rbx, %rax ; X64-BMI1NOTBM-NEXT: popq %rbx ; X64-BMI1NOTBM-NEXT: retq @@ -3063,7 +3063,7 @@ ; X64-BMI1BMI2-NEXT: negb %sil ; X64-BMI1BMI2-NEXT: movq $-1, %rax ; X64-BMI1BMI2-NEXT: shrxq %rsi, %rax, %rdi -; X64-BMI1BMI2-NEXT: callq use64 +; X64-BMI1BMI2-NEXT: callq use64@PLT ; X64-BMI1BMI2-NEXT: movq %rbx, %rax ; X64-BMI1BMI2-NEXT: popq %rbx ; X64-BMI1BMI2-NEXT: retq @@ -3100,7 +3100,7 @@ ; X86-NOBMI-NEXT: subl $8, %esp ; X86-NOBMI-NEXT: pushl %ebx ; X86-NOBMI-NEXT: pushl %eax -; X86-NOBMI-NEXT: calll use64 +; X86-NOBMI-NEXT: calll use64@PLT ; X86-NOBMI-NEXT: addl $16, %esp ; X86-NOBMI-NEXT: movl %edi, %eax ; X86-NOBMI-NEXT: movl %esi, %edx @@ -3133,7 +3133,7 @@ ; X86-BMI1NOTBM-NEXT: subl $8, %esp ; X86-BMI1NOTBM-NEXT: pushl %ebx ; X86-BMI1NOTBM-NEXT: pushl %eax -; X86-BMI1NOTBM-NEXT: calll use64 +; X86-BMI1NOTBM-NEXT: calll use64@PLT ; X86-BMI1NOTBM-NEXT: addl $16, %esp ; X86-BMI1NOTBM-NEXT: movl %edi, %eax ; X86-BMI1NOTBM-NEXT: movl %esi, %edx @@ -3165,7 +3165,7 @@ ; X86-BMI1BMI2-NEXT: subl $8, %esp ; X86-BMI1BMI2-NEXT: pushl %edx ; X86-BMI1BMI2-NEXT: pushl %ecx -; X86-BMI1BMI2-NEXT: calll use64 +; X86-BMI1BMI2-NEXT: calll use64@PLT ; X86-BMI1BMI2-NEXT: addl $16, %esp ; X86-BMI1BMI2-NEXT: movl %edi, %eax ; X86-BMI1BMI2-NEXT: movl %esi, %edx @@ -3185,7 +3185,7 @@ ; X64-NOBMI-NEXT: movq (%rdi), %rbx ; X64-NOBMI-NEXT: andq %rax, %rbx ; X64-NOBMI-NEXT: movq %rax, %rdi -; X64-NOBMI-NEXT: callq use64 +; X64-NOBMI-NEXT: callq use64@PLT ; X64-NOBMI-NEXT: movq %rbx, %rax ; X64-NOBMI-NEXT: popq %rbx ; X64-NOBMI-NEXT: retq @@ -3201,7 +3201,7 @@ ; X64-BMI1NOTBM-NEXT: movq (%rdi), %rbx ; X64-BMI1NOTBM-NEXT: andq %rax, %rbx ; X64-BMI1NOTBM-NEXT: movq %rax, %rdi -; X64-BMI1NOTBM-NEXT: callq use64 +; X64-BMI1NOTBM-NEXT: callq use64@PLT ; X64-BMI1NOTBM-NEXT: movq %rbx, %rax ; X64-BMI1NOTBM-NEXT: popq %rbx ; X64-BMI1NOTBM-NEXT: retq @@ -3215,7 +3215,7 @@ ; X64-BMI1BMI2-NEXT: negb %sil ; X64-BMI1BMI2-NEXT: movq $-1, %rax ; X64-BMI1BMI2-NEXT: shrxq %rsi, %rax, %rdi -; X64-BMI1BMI2-NEXT: callq use64 +; X64-BMI1BMI2-NEXT: callq use64@PLT ; X64-BMI1BMI2-NEXT: movq %rbx, %rax ; X64-BMI1BMI2-NEXT: popq %rbx ; X64-BMI1BMI2-NEXT: retq @@ -3248,7 +3248,7 @@ ; X86-NOBMI-NEXT: subl $8, %esp ; X86-NOBMI-NEXT: pushl %edi ; X86-NOBMI-NEXT: pushl %esi -; X86-NOBMI-NEXT: calll use64 +; X86-NOBMI-NEXT: calll use64@PLT ; X86-NOBMI-NEXT: addl $16, %esp ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-NOBMI-NEXT: andl {{[0-9]+}}(%esp), %edi @@ -3278,7 +3278,7 @@ ; X86-BMI1NOTBM-NEXT: subl $8, %esp ; X86-BMI1NOTBM-NEXT: pushl %edi ; X86-BMI1NOTBM-NEXT: pushl %esi -; X86-BMI1NOTBM-NEXT: calll use64 +; X86-BMI1NOTBM-NEXT: calll use64@PLT ; X86-BMI1NOTBM-NEXT: addl $16, %esp ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %esi ; X86-BMI1NOTBM-NEXT: andl {{[0-9]+}}(%esp), %edi @@ -3307,7 +3307,7 @@ ; X86-BMI1BMI2-NEXT: subl $8, %esp ; X86-BMI1BMI2-NEXT: pushl %esi ; X86-BMI1BMI2-NEXT: pushl %edi -; X86-BMI1BMI2-NEXT: calll use64 +; X86-BMI1BMI2-NEXT: calll use64@PLT ; X86-BMI1BMI2-NEXT: addl $16, %esp ; X86-BMI1BMI2-NEXT: andl {{[0-9]+}}(%esp), %edi ; X86-BMI1BMI2-NEXT: andl {{[0-9]+}}(%esp), %esi @@ -3330,7 +3330,7 @@ ; X64-NOBMI-NEXT: # kill: def $cl killed $cl killed $rcx ; X64-NOBMI-NEXT: shrq %cl, %rbx ; X64-NOBMI-NEXT: movq %rbx, %rdi -; X64-NOBMI-NEXT: callq use64 +; X64-NOBMI-NEXT: callq use64@PLT ; X64-NOBMI-NEXT: andq %r14, %rbx ; X64-NOBMI-NEXT: movq %rbx, %rax ; X64-NOBMI-NEXT: addq $8, %rsp @@ -3350,7 +3350,7 @@ ; X64-BMI1NOTBM-NEXT: # kill: def $cl killed $cl killed $rcx ; X64-BMI1NOTBM-NEXT: shrq %cl, %rbx ; X64-BMI1NOTBM-NEXT: movq %rbx, %rdi -; X64-BMI1NOTBM-NEXT: callq use64 +; X64-BMI1NOTBM-NEXT: callq use64@PLT ; X64-BMI1NOTBM-NEXT: andq %r14, %rbx ; X64-BMI1NOTBM-NEXT: movq %rbx, %rax ; X64-BMI1NOTBM-NEXT: addq $8, %rsp @@ -3369,7 +3369,7 @@ ; X64-BMI1BMI2-NEXT: negb %al ; X64-BMI1BMI2-NEXT: movq $-1, %rcx ; X64-BMI1BMI2-NEXT: shrxq %rax, %rcx, %rdi -; X64-BMI1BMI2-NEXT: callq use64 +; X64-BMI1BMI2-NEXT: callq use64@PLT ; X64-BMI1BMI2-NEXT: bzhiq %rbx, %r14, %rax ; X64-BMI1BMI2-NEXT: addq $8, %rsp ; X64-BMI1BMI2-NEXT: popq %rbx diff --git a/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll b/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll --- a/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll +++ b/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefix=CHECK --check-prefixes=ISEL,SSE,SSE-ISEL -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=CHECK --check-prefixes=FASTISEL,SSE,SSE-FASTISEL -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefixes=ISEL,AVX,AVX-ISEL -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefixes=FASTISEL,AVX,AVX-FASTISEL -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=skylake-avx512 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefixes=ISEL,AVX512,AVX512-ISEL -; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 -mcpu=skylake-avx512 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefixes=FASTISEL,AVX512,AVX512-FASTISEL +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s --check-prefixes=ISEL,SSE +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefixes=FASTISEL,SSE +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s --check-prefixes=ISEL,AVX +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 -mcpu=corei7-avx | FileCheck %s --check-prefixes=FASTISEL,AVX +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=skylake-avx512 -verify-machineinstrs | FileCheck %s --check-prefixes=ISEL,AVX512,AVX512-ISEL +; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 -mcpu=skylake-avx512 -verify-machineinstrs | FileCheck %s --check-prefixes=FASTISEL,AVX512,AVX512-FASTISEL define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) { diff --git a/llvm/test/CodeGen/X86/haddsub-undef.ll b/llvm/test/CodeGen/X86/haddsub-undef.ll --- a/llvm/test/CodeGen/X86/haddsub-undef.ll +++ b/llvm/test/CodeGen/X86/haddsub-undef.ll @@ -4,7 +4,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512,AVX512-SLOW -; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512,AVX512-FAST +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512 ; Verify that we correctly fold horizontal binop even in the presence of UNDEFs. diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll --- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V0,X86-V0 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V1,X86-V1 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI2,X86-BMI2,V2,X86-V2 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,BMI2,X86-BMI2,AVX2,X86-AVX2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V0,X64-V0 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V1,X64-V1 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI2,X64-BMI2,V2,X64-V2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,BMI2,X64-BMI2,AVX2,X64-AVX2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X86,SSE2,X86-SSE2,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X86,SSE2,X86-SSE2,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,SSE2,X86-SSE2,X86-BMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X86,X86-BMI2,AVX2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X64,SSE2,X64-SSE2,X64-BMI1 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X64,SSE2,X64-SSE2,X64-BMI1 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,SSE2,X64-SSE2,X64-BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X64,X64-BMI2,AVX2 ; We are looking for the following pattern here: ; (X & (C l>> Y)) ==/!= 0 diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll --- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll +++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll @@ -1,12 +1,12 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V0,X86-V0 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI1,X86-BMI1,V1,X86-V1 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,SSE2,X86-SSE2,BMI2,X86-BMI2,V2,X86-V2 -; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X86,V0123,X86-V0123,BMI2,X86-BMI2,AVX2,X86-AVX2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V0,X64-V0 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI1,X64-BMI1,V1,X64-V1 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,SSE2,X64-SSE2,BMI2,X64-BMI2,V2,X64-V2 -; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=CHECK,X64,V0123,X64-V0123,BMI2,X64-BMI2,AVX2,X64-AVX2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=X86,X86-SSE2,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=X86,X86-SSE2,X86-BMI1 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=X86,X86-SSE2,X86-BMI2 +; RUN: llc -mtriple=i686-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=X86,X86-BMI2,AVX2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2 < %s | FileCheck %s --check-prefixes=X64,X64-SSE2,X64-BMI1 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi < %s | FileCheck %s --check-prefixes=X64,X64-SSE2,X64-BMI1 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2 < %s | FileCheck %s --check-prefixes=X64,X64-SSE2,X64-BMI2 +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+sse,sse2,+bmi,+bmi2,+avx2 < %s | FileCheck %s --check-prefixes=X64,X64-BMI2,AVX2 ; We are looking for the following pattern here: ; (X & (C << Y)) ==/!= 0 diff --git a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll --- a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll +++ b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll @@ -1,26 +1,26 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,X86-AVX512,X86-AVX512DQVL,X86-AVX512-WIN,X86-AVX512DQVL-WIN -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,X86-AVX512,X86-AVX512DQVL,X86-AVX512-LIN,X86-AVX512DQVL-LIN -; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,X64-AVX512,X64-AVX512DQVL,X64-AVX512-WIN,X64-AVX512DQVL-WIN -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,X64-AVX512,X64-AVX512DQVL,X64-AVX512-LIN,X64-AVX512DQVL-LIN -; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X86,X86-AVX512,X86-AVX512DQ,X86-AVX512-WIN,X86-AVX512DQ-WIN -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X86,X86-AVX512,X86-AVX512DQ,X86-AVX512-LIN,X86-AVX512DQ-LIN -; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X64,X64-AVX512,X64-AVX512DQ,X64-AVX512-WIN,X64-AVX512DQ-WIN -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,X86,X64-AVX512,X64-AVX512DQ,X64-AVX512-LIN,X64-AVX512DQ-LIN -; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,X86-AVX512,X86-AVX512F,X86-AVX512-WIN,X86-AVX512F-WIN -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,X86-AVX512,X86-AVX512F,X86-AVX512-LIN,X86-AVX512F-LIN -; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X64,X64-AVX512,X64-AVX512F,X64-AVX512-WIN,X64-AVX512F-WIN -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,X86,X64-AVX512,X64-AVX512F,X64-AVX512-LIN,X64-AVX512F-LIN -; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X86,X86-SSE,X86-SSE3,X86-SSE-WIN,X86-SSE3-WIN -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X86,X86-SSE,X86-SSE3,X86-SSE-LIN,X86-SSE3-LIN -; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X64,X64-SSE,X64-SSE3,X64-SSE-WIN,X64-SSE3-WIN -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=CHECK,X64,X64-SSE,X64-SSE3,X64-SSE-LIN,X64-SSE3-LIN -; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,X86-SSE,X86-SSE2,X86-SSE-WIN,X86-SSE2-WIN -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86,X86-SSE,X86-SSE2,X86-SSE-LIN,X86-SSE2-LIN -; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,X64-SSE,X64-SSE2,X64-SSE-WIN,X64-SSE2-WIN -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,X64-SSE,X64-SSE2,X64-SSE-LIN,X64-SSE2-LIN -; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=-sse | FileCheck %s --check-prefixes=CHECK,X86,X87,X87-WIN -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=-sse | FileCheck %s --check-prefixes=CHECK,X86,X87,X87-LIN +; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X86-AVX512DQVL,X86-AVX512-WIN +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X86-AVX512DQVL,X86-AVX512-LIN +; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X64-AVX512,X64-AVX512-WIN +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X64-AVX512,X64-AVX512-LIN +; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=X86-AVX512DQ,X86-AVX512-WIN +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=X86-AVX512DQ,X86-AVX512-LIN +; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=X64-AVX512,X64-AVX512-WIN +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=X64-AVX512,X64-AVX512-LIN +; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+avx512f | FileCheck %s --check-prefixes=X86-AVX512-WIN,X86-AVX512F-WIN +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=X86-AVX512-LIN,X86-AVX512F-LIN +; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+avx512f | FileCheck %s --check-prefixes=X64-AVX512,X64-AVX512-WIN +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefixes=X64-AVX512,X64-AVX512-LIN +; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+sse3 | FileCheck %s --check-prefixes=X86-SSE-WIN,X86-SSE3-WIN +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=X86-SSE-LIN,X86-SSE3-LIN +; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+sse3 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE-WIN,X64-SSE3-WIN +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE-LIN,X64-SSE3-LIN +; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE-WIN,X86-SSE2-WIN +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE-LIN,X86-SSE2-LIN +; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc -mattr=+sse2 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE-WIN,X64-SSE2-WIN +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=X64-SSE,X64-SSE-LIN,X64-SSE2-LIN +; RUN: llc < %s -mtriple=i386-pc-windows-msvc -mattr=-sse | FileCheck %s --check-prefix=X87-WIN +; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=-sse | FileCheck %s --check-prefix=X87-LIN ; Check that scalar FP conversions to signed and unsigned int64 are using ; reasonable sequences, across platforms and target switches. @@ -1425,7 +1425,7 @@ ; X64-AVX512-LIN-LABEL: t_to_u64: ; X64-AVX512-LIN: # %bb.0: ; X64-AVX512-LIN-NEXT: pushq %rax -; X64-AVX512-LIN-NEXT: callq __fixunstfdi +; X64-AVX512-LIN-NEXT: callq __fixunstfdi@PLT ; X64-AVX512-LIN-NEXT: popq %rcx ; X64-AVX512-LIN-NEXT: retq ; @@ -1460,7 +1460,7 @@ ; X64-SSE-LIN-LABEL: t_to_u64: ; X64-SSE-LIN: # %bb.0: ; X64-SSE-LIN-NEXT: pushq %rax -; X64-SSE-LIN-NEXT: callq __fixunstfdi +; X64-SSE-LIN-NEXT: callq __fixunstfdi@PLT ; X64-SSE-LIN-NEXT: popq %rcx ; X64-SSE-LIN-NEXT: retq ; @@ -1517,7 +1517,7 @@ ; X64-AVX512-LIN-LABEL: t_to_s64: ; X64-AVX512-LIN: # %bb.0: ; X64-AVX512-LIN-NEXT: pushq %rax -; X64-AVX512-LIN-NEXT: callq __fixtfdi +; X64-AVX512-LIN-NEXT: callq __fixtfdi@PLT ; X64-AVX512-LIN-NEXT: popq %rcx ; X64-AVX512-LIN-NEXT: retq ; @@ -1552,7 +1552,7 @@ ; X64-SSE-LIN-LABEL: t_to_s64: ; X64-SSE-LIN: # %bb.0: ; X64-SSE-LIN-NEXT: pushq %rax -; X64-SSE-LIN-NEXT: callq __fixtfdi +; X64-SSE-LIN-NEXT: callq __fixtfdi@PLT ; X64-SSE-LIN-NEXT: popq %rcx ; X64-SSE-LIN-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/sse-intrinsics-x86.ll b/llvm/test/CodeGen/X86/sse-intrinsics-x86.ll --- a/llvm/test/CodeGen/X86/sse-intrinsics-x86.ll +++ b/llvm/test/CodeGen/X86/sse-intrinsics-x86.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE -; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1 -; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512 -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1 -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512 +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+sse -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE,X86-SSE +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,X86-AVX,AVX1 +; RUN: llc < %s -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,X86-AVX,AVX512 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=-sse2 -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,SSE,X64-SSE +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,X64-AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,AVX,X64-AVX,AVX512 define <4 x float> @test_x86_sse_cmp_ps(<4 x float> %a0, <4 x float> %a1) { ; SSE-LABEL: test_x86_sse_cmp_ps: diff --git a/llvm/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll --- a/llvm/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse41-intrinsics-fast-isel.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1 -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512 -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1 -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512 +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,X86-SSE +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,X86-AVX,AVX1 +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX,X86-AVX,AVX512 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,X64-SSE +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,X64-AVX,AVX1 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=AVX,X64-AVX,AVX512 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse41-builtins.c diff --git a/llvm/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll --- a/llvm/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/sse42-intrinsics-fast-isel.ll @@ -1,10 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X86,SSE,X86-SSE -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX1,X86-AVX1 -; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X86,AVX,X86-AVX,AVX512,X86-AVX512 -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,X64,SSE,X64-SSE -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX1,X64-AVX1 -; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,X64,AVX,X64-AVX,AVX512,X64-AVX512 +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=X86,SSE,X86-SSE +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X86,AVX,X86-AVX,AVX1 +; RUN: llc < %s -fast-isel -mtriple=i386-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X86,AVX,X86-AVX,AVX512 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=X64,SSE,X64-SSE +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,AVX,X64-AVX,AVX1 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=X64,AVX,X64-AVX,AVX512 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse42-builtins.c diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll @@ -3,17 +3,17 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2 ; Just one 32-bit run to make sure we do reasonable things for i64 cases. -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE2 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2 declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOPAVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOPAVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=XOPAVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=XOPAVX2 declare <4 x i64> @llvm.fshl.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) declare <8 x i32> @llvm.fshl.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll --- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2 ; Just one 32-bit run to make sure we do reasonable things for i64 cases. -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE2 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2 declare <2 x i32> @llvm.fshl.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll @@ -3,17 +3,17 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2 ; Just one 32-bit run to make sure we do reasonable things for i64 cases. -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE2 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2 declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOPAVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOPAVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=XOPAVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=XOPAVX2 declare <4 x i64> @llvm.fshr.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) declare <8 x i32> @llvm.fshr.v8i32(<8 x i32>, <8 x i32>, <8 x i32>) diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll --- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll +++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll @@ -1,19 +1,19 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE41 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOP,XOPAVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOP,XOPAVX2 ; Just one 32-bit run to make sure we do reasonable things for i64 cases. -; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86-SSE2 +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2 declare <2 x i32> @llvm.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll --- a/llvm/test/CodeGen/X86/vector-rotate-256.ll +++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VL -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX512,AVX512BW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLBW -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefixes=AVX512,AVX512VBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVBMI2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefixes=XOPAVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefixes=XOPAVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512VLBW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2 | FileCheck %s --check-prefix=AVX512VBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vbmi2,+avx512vl | FileCheck %s --check-prefix=AVX512VLVBMI2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=XOPAVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=XOPAVX2 ; ; Variable Rotates