Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -2113,15 +2113,15 @@ (COPY_TO_REGCLASS VK8:$src, GR8)>; def : Pat<(i32 (zext (i16 (bitconvert (v16i1 VK16:$src))))), - (i32 (SUBREG_TO_REG (i64 0), - (i16 (COPY_TO_REGCLASS VK16:$src, GR16)), sub_16bit))>; + (KMOVWrk VK16:$src)>; def : Pat<(i32 (anyext (i16 (bitconvert (v16i1 VK16:$src))))), (i32 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS VK16:$src, GR16)), sub_16bit))>; def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))), - (i32 (SUBREG_TO_REG (i64 0), - (i8 (COPY_TO_REGCLASS VK8:$src, GR8)), sub_8bit))>; + (MOVZX32rr8 (COPY_TO_REGCLASS VK8:$src, GR8))>, Requires<[NoDQI]>; +def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))), + (KMOVBrk VK8:$src)>, Requires<[HasDQI]>; def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))), (i32 (SUBREG_TO_REG (i64 0), (i8 (COPY_TO_REGCLASS VK8:$src, GR8)), sub_8bit))>; Index: test/CodeGen/X86/avx512-mask-op.ll =================================================================== --- test/CodeGen/X86/avx512-mask-op.ll +++ test/CodeGen/X86/avx512-mask-op.ll @@ -55,6 +55,7 @@ ; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzbl %al, %eax ; KNL-NEXT: retq ; ; SKX-LABEL: mask8_zext: @@ -1959,3 +1960,69 @@ store <64 x i1> %v, <64 x i1>* %a ret void } + +define i16 @test_bitcast_zext_v8i1(i16 %v) { +; KNL-LABEL: test_bitcast_zext_v8i1: +; KNL: ## BB#0: +; KNL-NEXT: kmovw %edi, %k0 +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: addl %eax, %eax +; KNL-NEXT: ## kill: %AX %AX %EAX +; KNL-NEXT: retq +; +; SKX-LABEL: test_bitcast_zext_v8i1: +; SKX: ## BB#0: +; SKX-NEXT: kmovw %edi, %k0 +; SKX-NEXT: kmovb %k0, %eax +; SKX-NEXT: addl %eax, %eax +; SKX-NEXT: ## kill: %AX %AX %EAX +; SKX-NEXT: retq + %v1 = bitcast i16 %v to <16 x i1> + %mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> + %mask1 = bitcast <8 x i1> %mask to i8 + %val = zext i8 %mask1 to i16 + %val1 = add i16 %val, %val + ret i16 %val1 +} + +define i32 @test_bitcast_zext_v16i1(i32 %v) { +; KNL-LABEL: test_bitcast_zext_v16i1: +; KNL: ## BB#0: +; KNL-NEXT: pushq %rbp +; KNL-NEXT: Ltmp21: +; KNL-NEXT: .cfi_def_cfa_offset 16 +; KNL-NEXT: Ltmp22: +; KNL-NEXT: .cfi_offset %rbp, -16 +; KNL-NEXT: movq %rsp, %rbp +; KNL-NEXT: Ltmp23: +; KNL-NEXT: .cfi_def_cfa_register %rbp +; KNL-NEXT: andq $-32, %rsp +; KNL-NEXT: subq $32, %rsp +; KNL-NEXT: movl %edi, (%rsp) +; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 +; KNL-NEXT: kmovw (%rsp), %k1 +; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z} +; KNL-NEXT: vpmovdb %zmm0, %xmm0 +; KNL-NEXT: vpmovsxbd %xmm0, %zmm0 +; KNL-NEXT: vpslld $31, %zmm0, %zmm0 +; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0 +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: addl %eax, %eax +; KNL-NEXT: movq %rbp, %rsp +; KNL-NEXT: popq %rbp +; KNL-NEXT: retq +; +; SKX-LABEL: test_bitcast_zext_v16i1: +; SKX: ## BB#0: +; SKX-NEXT: kmovd %edi, %k0 +; SKX-NEXT: kmovw %k0, %eax +; SKX-NEXT: addl %eax, %eax +; SKX-NEXT: retq + %v1 = bitcast i32 %v to <32 x i1> + %mask = shufflevector <32 x i1> %v1, <32 x i1> undef, <16 x i32> + %mask1 = bitcast <16 x i1> %mask to i16 + %val = zext i16 %mask1 to i32 + %val1 = add i32 %val, %val + ret i32 %val1 +} Index: test/CodeGen/X86/avx512-mask-zext-bugfix.ll =================================================================== --- test/CodeGen/X86/avx512-mask-zext-bugfix.ll +++ test/CodeGen/X86/avx512-mask-zext-bugfix.ll @@ -0,0 +1,97 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s + +; ModuleID = 'mask_set.c' +source_filename = "mask_set.c" +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare void @llvm.dbg.declare(metadata, metadata, metadata) + +; Function Attrs: nounwind uwtable +declare i64 @calc_expected_mask_val(i8* %valp, i32 %el_size, i32 %length) +; Function Attrs: nounwind uwtable +declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, i8* %fname, i8* %input) + +; Function Attrs: nounwind readonly +declare i32 @memcmp(i8*, i8*, i64) + +; Function Attrs: nounwind uwtable +define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){ +; CHECK-LABEL: test_xmm: +; CHECK: ## BB#0: +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: Ltmp0: +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: subq $80, %rsp +; CHECK-NEXT: Ltmp1: +; CHECK-NEXT: .cfi_def_cfa_offset 96 +; CHECK-NEXT: Ltmp2: +; CHECK-NEXT: .cfi_offset %rbx, -16 +; CHECK-NEXT: movl $4, %eax +; CHECK-NEXT: vpmovw2m %xmm0, %k0 +; CHECK-NEXT: movl $2, %esi +; CHECK-NEXT: movl $8, %edi +; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp) ## 4-byte Spill +; CHECK-NEXT: movq %rdx, %rdi +; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %r8d ## 4-byte Reload +; CHECK-NEXT: movq %rdx, {{[0-9]+}}(%rsp) ## 8-byte Spill +; CHECK-NEXT: movl %r8d, %edx +; CHECK-NEXT: movq %rcx, {{[0-9]+}}(%rsp) ## 8-byte Spill +; CHECK-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) ## 16-byte Spill +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill +; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: callq _calc_expected_mask_val +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: movw %dx, %r9w +; CHECK-NEXT: movzwl %r9w, %esi +; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload +; CHECK-NEXT: kmovb %k0, %edi +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx ## 8-byte Reload +; CHECK-NEXT: callq _check_mask16 +; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 ## 16-byte Reload +; CHECK-NEXT: vpmovd2m %xmm0, %k0 +; CHECK-NEXT: kmovb %k0, %r10d +; CHECK-NEXT: movb %r10b, %r11b +; CHECK-NEXT: movzbl %r10b, %esi +; CHECK-NEXT: movw %si, %r9w +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdi ## 8-byte Reload +; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %esi ## 4-byte Reload +; CHECK-NEXT: movl {{[0-9]+}}(%rsp), %edx ## 4-byte Reload +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill +; CHECK-NEXT: movb %r11b, {{[0-9]+}}(%rsp) ## 1-byte Spill +; CHECK-NEXT: movw %r9w, {{[0-9]+}}(%rsp) ## 2-byte Spill +; CHECK-NEXT: callq _calc_expected_mask_val +; CHECK-NEXT: movw %ax, %r9w +; CHECK-NEXT: movw {{[0-9]+}}(%rsp), %bx ## 2-byte Reload +; CHECK-NEXT: movzwl %bx, %edi +; CHECK-NEXT: movzwl %r9w, %esi +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx ## 8-byte Reload +; CHECK-NEXT: callq _check_mask16 +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%rsp) ## 4-byte Spill +; CHECK-NEXT: addq $80, %rsp +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: retq + %d2 = bitcast <2 x i64> %a to <8 x i16> + %m2 = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %d2) + %conv7 = zext i8 %m2 to i16 + %call9 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 2, i32 8) + %conv10 = trunc i64 %call9 to i16 + %call12 = call i32 @check_mask16(i16 zeroext %conv7, i16 zeroext %conv10, i8* %fname, i8* %arraydecay) + %d3 = bitcast <2 x i64> %a to <4 x i32> + %m3 = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %d3) + %conv14 = zext i8 %m3 to i16 + %call16 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 4, i32 4) + %conv17 = trunc i64 %call16 to i16 + %call19 = call i32 @check_mask16(i16 zeroext %conv14, i16 zeroext %conv17, i8* %fname, i8* %arraydecay) + ret void +} + +; Function Attrs: nounwind readnone +declare i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16>) + +; Function Attrs: nounwind readnone +declare i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32>) +