Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -2113,15 +2113,15 @@ (COPY_TO_REGCLASS VK8:$src, GR8)>; def : Pat<(i32 (zext (i16 (bitconvert (v16i1 VK16:$src))))), - (i32 (SUBREG_TO_REG (i64 0), - (i16 (COPY_TO_REGCLASS VK16:$src, GR16)), sub_16bit))>; + (KMOVWrk VK16:$src)>; def : Pat<(i32 (anyext (i16 (bitconvert (v16i1 VK16:$src))))), (i32 (SUBREG_TO_REG (i64 0), (i16 (COPY_TO_REGCLASS VK16:$src, GR16)), sub_16bit))>; def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))), - (i32 (SUBREG_TO_REG (i64 0), - (i8 (COPY_TO_REGCLASS VK8:$src, GR8)), sub_8bit))>; + (MOVZX32rr8 (COPY_TO_REGCLASS VK8:$src, GR8))>, Requires<[NoDQI]>; +def : Pat<(i32 (zext (i8 (bitconvert (v8i1 VK8:$src))))), + (KMOVBrk VK8:$src)>, Requires<[HasDQI]>; def : Pat<(i32 (anyext (i8 (bitconvert (v8i1 VK8:$src))))), (i32 (SUBREG_TO_REG (i64 0), (i8 (COPY_TO_REGCLASS VK8:$src, GR8)), sub_8bit))>; Index: test/CodeGen/X86/avx512-mask-op.ll =================================================================== --- test/CodeGen/X86/avx512-mask-op.ll +++ test/CodeGen/X86/avx512-mask-op.ll @@ -55,6 +55,7 @@ ; KNL-NEXT: kmovw %edi, %k0 ; KNL-NEXT: knotw %k0, %k0 ; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzbl %al, %eax ; KNL-NEXT: retq ; ; SKX-LABEL: mask8_zext: @@ -1959,3 +1960,43 @@ store <64 x i1> %v, <64 x i1>* %a ret void } + +define i32 @test_bitcast_v8i1_zext(<16 x i32> %a) { +; KNL-LABEL: test_bitcast_v8i1_zext: +; KNL: ## BB#0: +; KNL-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 +; KNL-NEXT: kmovw %k0, %eax +; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: addl %eax, %eax +; KNL-NEXT: retq +; +; SKX-LABEL: test_bitcast_v8i1_zext: +; SKX: ## BB#0: +; SKX-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; SKX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 +; SKX-NEXT: kmovb %k0, %eax +; SKX-NEXT: addl %eax, %eax +; SKX-NEXT: retq + %v1 = icmp eq <16 x i32> %a, zeroinitializer + %mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> + %mask1 = bitcast <8 x i1> %mask to i8 + %val = zext i8 %mask1 to i32 + %val1 = add i32 %val, %val + ret i32 %val1 +} + +define i32 @test_bitcast_v16i1_zext(<16 x i32> %a) { +; CHECK-LABEL: test_bitcast_v16i1_zext: +; CHECK: ## BB#0: +; CHECK-NEXT: vpxord %zmm1, %zmm1, %zmm1 +; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 +; CHECK-NEXT: kmovw %k0, %eax +; CHECK-NEXT: addl %eax, %eax +; CHECK-NEXT: retq + %v1 = icmp eq <16 x i32> %a, zeroinitializer + %mask1 = bitcast <16 x i1> %v1 to i16 + %val = zext i16 %mask1 to i32 + %val1 = add i32 %val, %val + ret i32 %val1 +} Index: test/CodeGen/X86/avx512-mask-zext-bugfix.ll =================================================================== --- test/CodeGen/X86/avx512-mask-zext-bugfix.ll +++ test/CodeGen/X86/avx512-mask-zext-bugfix.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s + +; ModuleID = 'mask_set.c' +source_filename = "mask_set.c" +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +declare void @llvm.dbg.declare(metadata, metadata, metadata) + +; Function Attrs: nounwind uwtable +declare i64 @calc_expected_mask_val(i8* %valp, i32 %el_size, i32 %length) +; Function Attrs: nounwind uwtable +declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, i8* %fname, i8* %input) + +; Function Attrs: nounwind uwtable +define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,i8* %arraydecay,i8* %fname){ +; CHECK-LABEL: test_xmm: +; CHECK: ## BB#0: +; CHECK: callq _calc_expected_mask_val +; CHECK-NEXT: movl %eax, %edx +; CHECK-NEXT: movw %dx, %r9w +; CHECK-NEXT: movzwl %r9w, %esi +; CHECK-NEXT: kmovw {{[0-9]+}}(%rsp), %k0 ## 2-byte Reload +; CHECK-NEXT: kmovb %k0, %edi +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rdx ## 8-byte Reload +; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rcx ## 8-byte Reload +; CHECK-NEXT: callq _check_mask16 + %d2 = bitcast <2 x i64> %a to <8 x i16> + %m2 = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %d2) + %conv7 = zext i8 %m2 to i16 + %call9 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 2, i32 8) + %conv10 = trunc i64 %call9 to i16 + %call12 = call i32 @check_mask16(i16 zeroext %conv7, i16 zeroext %conv10, i8* %fname, i8* %arraydecay) + %d3 = bitcast <2 x i64> %a to <4 x i32> + %m3 = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %d3) + %conv14 = zext i8 %m3 to i16 + %call16 = call i64 @calc_expected_mask_val(i8* %arraydecay, i32 4, i32 4) + %conv17 = trunc i64 %call16 to i16 + %call19 = call i32 @check_mask16(i16 zeroext %conv14, i16 zeroext %conv17, i8* %fname, i8* %arraydecay) + ret void +} + +; Function Attrs: nounwind readnone +declare i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16>) + +; Function Attrs: nounwind readnone +declare i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32>) +