Index: test/CodeGen/X86/avx2-intrinsics-x86.ll =================================================================== --- test/CodeGen/X86/avx2-intrinsics-x86.ll +++ test/CodeGen/X86/avx2-intrinsics-x86.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=avx2 -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2 -; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=skx -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL +; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -show-mc-encoding | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL define <16 x i16> @test_x86_avx2_packssdw(<8 x i32> %a0, <8 x i32> %a1) { ; AVX2-LABEL: test_x86_avx2_packssdw: @@ -1619,10 +1619,10 @@ ; ; AVX512VL-LABEL: test_gather_mask: ; AVX512VL: ## BB#0: -; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] -; AVX512VL-NEXT: vgatherdps %ymm3, (%eax,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x88] ; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x08] +; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %ecx ## encoding: [0x8b,0x4c,0x24,0x04] +; AVX512VL-NEXT: vmovaps %ymm2, %ymm3 ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] +; AVX512VL-NEXT: vgatherdps %ymm3, (%ecx,%ymm1,4), %ymm0 ## encoding: [0xc4,0xe2,0x65,0x92,0x04,0x89] ; AVX512VL-NEXT: vmovups %ymm2, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x10] ; AVX512VL-NEXT: retl ## encoding: [0xc3] %a_i8 = bitcast float* %a to i8* Index: test/CodeGen/X86/avx512-cmp.ll =================================================================== --- test/CodeGen/X86/avx512-cmp.ll +++ test/CodeGen/X86/avx512-cmp.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s --check-prefix=ALL --check-prefix=KNL -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s --check-prefix=ALL --check-prefix=SKX +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=KNL +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=SKX define double @test1(double %a, double %b) nounwind { ; ALL-LABEL: test1: @@ -126,11 +126,11 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) { ; ALL-LABEL: test8: ; ALL: ## BB#0: +; ALL-NEXT: notl %edi ; ALL-NEXT: xorl $-2147483648, %esi ## imm = 0x80000000 ; ALL-NEXT: testl %edx, %edx ; ALL-NEXT: movl $1, %eax ; ALL-NEXT: cmovel %eax, %edx -; ALL-NEXT: notl %edi ; ALL-NEXT: orl %edi, %esi ; ALL-NEXT: cmovnel %edx, %eax ; ALL-NEXT: retq Index: test/CodeGen/X86/pr32329.ll =================================================================== --- test/CodeGen/X86/pr32329.ll +++ test/CodeGen/X86/pr32329.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown -mcpu=skx | FileCheck %s -check-prefix=X86 -; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skx | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s -check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq | FileCheck %s -check-prefix=X64 +; According to https://bugs.llvm.org/show_bug.cgi?id=32329 it checks DAG ISEL failure on SKX target %struct.AA = type { i24, [4 x i8] } @@ -36,33 +37,33 @@ ; X86-NEXT: .cfi_offset %ebx, -12 ; X86-NEXT: .Lcfi7: ; X86-NEXT: .cfi_offset %ebp, -8 +; X86-NEXT: movl obj, %edx ; X86-NEXT: movsbl var_27, %eax +; X86-NEXT: movzwl var_2, %esi ; X86-NEXT: movl var_310, %ecx ; X86-NEXT: imull %eax, %ecx -; X86-NEXT: movl obj, %esi ; X86-NEXT: addl var_24, %ecx -; X86-NEXT: movzwl var_2, %edi -; X86-NEXT: andl $4194303, %esi # imm = 0x3FFFFF -; X86-NEXT: leal (%esi,%esi), %edx -; X86-NEXT: subl %eax, %edx -; X86-NEXT: movl %edx, %ebx -; X86-NEXT: subl %edi, %ebx -; X86-NEXT: imull %ebx, %ecx +; X86-NEXT: andl $4194303, %edx # imm = 0x3FFFFF +; X86-NEXT: leal (%edx,%edx), %ebx +; X86-NEXT: subl %eax, %ebx +; X86-NEXT: movl %ebx, %edi +; X86-NEXT: subl %esi, %edi +; X86-NEXT: imull %edi, %ecx ; X86-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71 -; X86-NEXT: movl $9, %edi +; X86-NEXT: movl $9, %esi ; X86-NEXT: xorl %ebp, %ebp -; X86-NEXT: shldl %cl, %edi, %ebp -; X86-NEXT: shlxl %ecx, %edi, %edi +; X86-NEXT: shldl %cl, %esi, %ebp +; X86-NEXT: shll %cl, %esi ; X86-NEXT: testb $32, %cl -; X86-NEXT: cmovnel %edi, %ebp +; X86-NEXT: cmovnel %esi, %ebp ; X86-NEXT: movl $0, %ecx -; X86-NEXT: cmovnel %ecx, %edi +; X86-NEXT: cmovnel %ecx, %esi +; X86-NEXT: cmpl %edx, %edi ; X86-NEXT: movl %ebp, var_50+4 -; X86-NEXT: cmpl %esi, %ebx +; X86-NEXT: movl %esi, var_50 ; X86-NEXT: setge var_205 -; X86-NEXT: imull %eax, %edx -; X86-NEXT: movl %edi, var_50 -; X86-NEXT: movb %dl, var_218 +; X86-NEXT: imull %eax, %ebx +; X86-NEXT: movb %bl, var_218 ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx @@ -71,25 +72,26 @@ ; ; X64-LABEL: foo: ; X64: # BB#0: # %entry -; X64-NEXT: movsbl {{.*}}(%rip), %eax +; X64-NEXT: movl {{.*}}(%rip), %eax +; X64-NEXT: movsbl {{.*}}(%rip), %r9d +; X64-NEXT: movzwl {{.*}}(%rip), %r8d ; X64-NEXT: movl {{.*}}(%rip), %ecx -; X64-NEXT: imull %eax, %ecx -; X64-NEXT: movl {{.*}}(%rip), %edx +; X64-NEXT: imull %r9d, %ecx ; X64-NEXT: addl {{.*}}(%rip), %ecx -; X64-NEXT: movzwl {{.*}}(%rip), %r8d -; X64-NEXT: andl $4194303, %edx # imm = 0x3FFFFF -; X64-NEXT: leal (%rdx,%rdx), %edi -; X64-NEXT: subl %eax, %edi +; X64-NEXT: andl $4194303, %eax # imm = 0x3FFFFF +; X64-NEXT: leal (%rax,%rax), %edi +; X64-NEXT: subl %r9d, %edi ; X64-NEXT: movl %edi, %esi ; X64-NEXT: subl %r8d, %esi ; X64-NEXT: imull %esi, %ecx ; X64-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71 -; X64-NEXT: movl $9, %r8d -; X64-NEXT: cmpl %edx, %esi +; X64-NEXT: movl $9, %edx +; X64-NEXT: # kill: %CL %CL %ECX +; X64-NEXT: shlq %cl, %rdx +; X64-NEXT: movq %rdx, {{.*}}(%rip) +; X64-NEXT: cmpl %eax, %esi ; X64-NEXT: setge {{.*}}(%rip) -; X64-NEXT: shlxq %rcx, %r8, %rcx -; X64-NEXT: imull %eax, %edi -; X64-NEXT: movq %rcx, {{.*}}(%rip) +; X64-NEXT: imull %r9d, %edi ; X64-NEXT: movb %dil, {{.*}}(%rip) ; X64-NEXT: retq entry: