diff --git a/llvm/lib/Target/X86/X86.td b/lib/Target/X86/X86.td --- a/llvm/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -386,6 +386,10 @@ : SubtargetFeature<"prefer-256-bit", "Prefer256Bit", "true", "Prefer 256-bit AVX instructions">; +def FeaturePreferMaskRegisters + : SubtargetFeature<"prefer-mask-registers", "PreferMaskRegisters", "true", + "Prefer AVX512 mask registers over PTEST/MOVMSK">; + // Lower indirect calls using a special construct called a `retpoline` to // mitigate potential Spectre v2 attacks against them. def FeatureRetpolineIndirectCalls @@ -801,6 +805,7 @@ FeatureBMI2, FeatureFMA, FeaturePRFCHW, + FeaturePreferMaskRegisters, FeatureSlowTwoMemOps, FeatureFastPartialYMMorZMMWrite, FeatureHasFastGather, diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -42574,23 +42574,38 @@ SDLoc DL(SetCC); bool HasAVX = Subtarget.hasAVX(); - // Use XOR (plus OR) and PTEST after SSE4.1 and before AVX512. + // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands. + // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands. // Otherwise use PCMPEQ (plus AND) and mask testing. if ((OpSize == 128 && Subtarget.hasSSE2()) || (OpSize == 256 && HasAVX) || (OpSize == 512 && Subtarget.useAVX512Regs())) { bool HasPT = Subtarget.hasSSE41(); + + // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened + // vector registers are essentially free. (Technically, widening registers + // prevents load folding, but the tradeoff is worth it.) + bool PreferKOT = Subtarget.preferMaskRegisters(); + bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512; + EVT VecVT = MVT::v16i8; - EVT CmpVT = MVT::v16i8; - if (OpSize == 256) - VecVT = CmpVT = MVT::v32i8; - if (OpSize == 512) { + EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT; + if (OpSize == 256) { + VecVT = MVT::v32i8; + CmpVT = PreferKOT ? MVT::v32i1 : VecVT; + } + EVT CastVT = VecVT; + if (OpSize == 512 || NeedZExt) { if (Subtarget.hasBWI()) { VecVT = MVT::v64i8; CmpVT = MVT::v64i1; + if (OpSize == 512) + CastVT = VecVT; } else { VecVT = MVT::v16i32; CmpVT = MVT::v16i1; + CastVT = OpSize == 512 ? VecVT : + OpSize == 256 ? MVT::v8i32 : MVT::v4i32; } } @@ -42600,11 +42615,27 @@ // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne // Use 2 vector equality compares and 'and' the results before doing a // MOVMSK. - SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0)); - SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1)); - SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0)); - SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1)); - if (VecVT == CmpVT && HasPT) { + SDValue A = DAG.getBitcast(CastVT, X.getOperand(0).getOperand(0)); + SDValue B = DAG.getBitcast(CastVT, X.getOperand(0).getOperand(1)); + SDValue C = DAG.getBitcast(CastVT, X.getOperand(1).getOperand(0)); + SDValue D = DAG.getBitcast(CastVT, X.getOperand(1).getOperand(1)); + if (NeedZExt) { + if (OpSize == 128) { + A = DAG.WidenVector(A, DL); + B = DAG.WidenVector(B, DL); + C = DAG.WidenVector(C, DL); + D = DAG.WidenVector(D, DL); + } + A = DAG.WidenVector(A, DL); + B = DAG.WidenVector(B, DL); + C = DAG.WidenVector(C, DL); + D = DAG.WidenVector(D, DL); + } + if (VecVT != CmpVT) { + SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE); + SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETNE); + Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp1, Cmp2); + } else if (HasPT) { SDValue Cmp1 = DAG.getNode(ISD::XOR, DL, VecVT, A, B); SDValue Cmp2 = DAG.getNode(ISD::XOR, DL, VecVT, C, D); Cmp = DAG.getNode(ISD::OR, DL, VecVT, Cmp1, Cmp2); @@ -42614,19 +42645,30 @@ Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2); } } else { - SDValue VecX = DAG.getBitcast(VecVT, X); - SDValue VecY = DAG.getBitcast(VecVT, Y); - if (VecVT == CmpVT && HasPT) { + SDValue VecX = DAG.getBitcast(CastVT, X); + SDValue VecY = DAG.getBitcast(CastVT, Y); + if (NeedZExt) { + if (OpSize == 128) { + VecX = DAG.WidenVector(VecX, DL); + VecY = DAG.WidenVector(VecY, DL); + } + VecX = DAG.WidenVector(VecX, DL); + VecY = DAG.WidenVector(VecY, DL); + } + if (VecVT != CmpVT) { + Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE); + } else if (HasPT) { Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY); } else { Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ); } } - // For 512-bits we want to emit a setcc that will lower to kortest. + // AVX512 should emit a setcc that will lower to kortest. if (VecVT != CmpVT) { - EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 : MVT::i16; - SDValue Mask = DAG.getAllOnesConstant(DL, KRegVT); - return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp), Mask, CC); + EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 : + CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16; + return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp), + DAG.getConstant(0, DL, KRegVT), CC); } if (HasPT) { SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64, diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h --- a/llvm/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -445,6 +445,9 @@ /// Indicates target prefers 256 bit instructions. bool Prefer256Bit = false; + /// Indicates target prefers AVX512 mask registers. + bool PreferMaskRegisters = false; + /// Threeway branch is profitable in this subtarget. bool ThreewayBranchProfitable = false; @@ -706,6 +709,7 @@ return UseRetpolineIndirectBranches; } bool useRetpolineExternalThunk() const { return UseRetpolineExternalThunk; } + bool preferMaskRegisters() const { return PreferMaskRegisters; } unsigned getPreferVectorWidth() const { return PreferVectorWidth; } unsigned getRequiredVectorWidth() const { return RequiredVectorWidth; } diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/lib/Target/X86/X86TargetTransformInfo.h --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/lib/Target/X86/X86TargetTransformInfo.h @@ -85,6 +85,7 @@ // Based on whether user set the -mprefer-vector-width command line. X86::FeaturePrefer128Bit, X86::FeaturePrefer256Bit, + X86::FeaturePreferMaskRegisters, // CPU name enums. These just follow CPU string. X86::ProcIntelAtom, diff --git a/llvm/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll --- a/llvm/test/CodeGen/X86/memcmp.ll +++ b/test/CodeGen/X86/memcmp.ll @@ -6,6 +6,7 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,prefer-mask-registers | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512Fk ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX512BW ; This tests codegen time inlining/optimization of memcmp @@ -1007,6 +1008,32 @@ ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: setne %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length16_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length16_eq: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512Fk-NEXT: vmovdqu (%rsi), %xmm1 +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-AVX512Fk-NEXT: kortestw %k0, %k0 +; X64-AVX512Fk-NEXT: setne %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length16_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind %cmp = icmp ne i32 %call, 0 ret i1 %cmp @@ -1063,6 +1090,32 @@ ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: sete %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length16_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length16_eq_const: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512Fk-NEXT: vmovdqa {{.*#+}} xmm1 = [858927408,926299444,825243960,892613426] +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-AVX512Fk-NEXT: kortestw %k0, %k0 +; X64-AVX512Fk-NEXT: sete %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length16_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind %c = icmp eq i32 %m, 0 ret i1 %c @@ -1147,14 +1200,51 @@ ; X64-AVX-LABEL: length24_eq: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 -; X64-AVX-NEXT: vmovq 16(%rdi), %xmm1 -; X64-AVX-NEXT: vmovq 16(%rsi), %xmm2 +; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; X64-AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 ; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: sete %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length24_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512F-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-AVX512F-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; X64-AVX512F-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length24_eq: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512Fk-NEXT: vmovdqu (%rsi), %xmm1 +; X64-AVX512Fk-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-AVX512Fk-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm3, %zmm2, %k0 +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-AVX512Fk-NEXT: kortestw %k0, %k1 +; X64-AVX512Fk-NEXT: sete %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length24_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-AVX512BW-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; X64-AVX512BW-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -1213,13 +1303,48 @@ ; X64-AVX-LABEL: length24_eq_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 -; X64-AVX-NEXT: vmovq 16(%rdi), %xmm1 +; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 ; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: setne %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length24_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length24_eq_const: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512Fk-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512Fk-NEXT: vmovdqa {{.*#+}} xmm2 = [959985462,858927408,0,0] +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm2, %zmm1, %k0 +; X64-AVX512Fk-NEXT: vmovdqa {{.*#+}} xmm1 = [858927408,926299444,825243960,892613426] +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-AVX512Fk-NEXT: kortestw %k0, %k1 +; X64-AVX512Fk-NEXT: setne %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length24_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind %c = icmp ne i32 %m, 0 ret i1 %c @@ -1315,11 +1440,39 @@ ; X64-AVX2-LABEL: length32_eq: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 -; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 -; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 ; X64-AVX2-NEXT: sete %al ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length32_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512F-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512F-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length32_eq: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512Fk-NEXT: vmovdqu (%rsi), %ymm1 +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-AVX512Fk-NEXT: kortestw %k0, %k0 +; X64-AVX512Fk-NEXT: sete %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length32_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512BW-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -1390,6 +1543,41 @@ ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: sete %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length32_eq_prefer128: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX512F-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX512F-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length32_eq_prefer128: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512Fk-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX512Fk-NEXT: vmovdqu (%rsi), %xmm2 +; X64-AVX512Fk-NEXT: vmovdqu 16(%rsi), %xmm3 +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 +; X64-AVX512Fk-NEXT: kortestw %k0, %k1 +; X64-AVX512Fk-NEXT: sete %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length32_eq_prefer128: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX512BW-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX512BW-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -1464,6 +1652,34 @@ ; X64-AVX2-NEXT: setne %al ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length32_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512F-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512Fk-LABEL: length32_eq_const: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512Fk-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-AVX512Fk-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-AVX512Fk-NEXT: kortestw %k0, %k0 +; X64-AVX512Fk-NEXT: setne %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; +; X64-AVX512BW-LABEL: length32_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind %c = icmp ne i32 %m, 0 ret i1 %c @@ -1536,18 +1752,27 @@ ; X64-AVX512F-LABEL: length64_eq: ; X64-AVX512F: # %bb.0: ; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512F-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k0 ; X64-AVX512F-NEXT: kortestw %k0, %k0 -; X64-AVX512F-NEXT: setae %al +; X64-AVX512F-NEXT: setne %al ; X64-AVX512F-NEXT: vzeroupper ; X64-AVX512F-NEXT: retq ; +; X64-AVX512Fk-LABEL: length64_eq: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512Fk-NEXT: vpcmpneqd (%rsi), %zmm0, %k0 +; X64-AVX512Fk-NEXT: kortestw %k0, %k0 +; X64-AVX512Fk-NEXT: setne %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; ; X64-AVX512BW-LABEL: length64_eq: ; X64-AVX512BW: # %bb.0: ; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm0, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k0 ; X64-AVX512BW-NEXT: kortestq %k0, %k0 -; X64-AVX512BW-NEXT: setae %al +; X64-AVX512BW-NEXT: setne %al ; X64-AVX512BW-NEXT: vzeroupper ; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind @@ -1605,18 +1830,27 @@ ; X64-AVX512F-LABEL: length64_eq_const: ; X64-AVX512F: # %bb.0: ; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512F-NEXT: vpcmpeqd {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k0 ; X64-AVX512F-NEXT: kortestw %k0, %k0 -; X64-AVX512F-NEXT: setb %al +; X64-AVX512F-NEXT: sete %al ; X64-AVX512F-NEXT: vzeroupper ; X64-AVX512F-NEXT: retq ; +; X64-AVX512Fk-LABEL: length64_eq_const: +; X64-AVX512Fk: # %bb.0: +; X64-AVX512Fk-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512Fk-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512Fk-NEXT: kortestw %k0, %k0 +; X64-AVX512Fk-NEXT: sete %al +; X64-AVX512Fk-NEXT: vzeroupper +; X64-AVX512Fk-NEXT: retq +; ; X64-AVX512BW-LABEL: length64_eq_const: ; X64-AVX512BW: # %bb.0: ; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k0 ; X64-AVX512BW-NEXT: kortestq %k0, %k0 -; X64-AVX512BW-NEXT: setb %al +; X64-AVX512BW-NEXT: sete %al ; X64-AVX512BW-NEXT: vzeroupper ; X64-AVX512BW-NEXT: retq %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind diff --git a/llvm/test/CodeGen/X86/setcc-wide-types.ll b/test/CodeGen/X86/setcc-wide-types.ll --- a/llvm/test/CodeGen/X86/setcc-wide-types.ll +++ b/test/CodeGen/X86/setcc-wide-types.ll @@ -401,19 +401,19 @@ ; ; AVX512F-LABEL: ne_i512: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 +; AVX512F-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 ; AVX512F-NEXT: xorl %eax, %eax ; AVX512F-NEXT: kortestw %k0, %k0 -; AVX512F-NEXT: setae %al +; AVX512F-NEXT: setne %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: ne_i512: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 +; AVX512BW-NEXT: vpcmpneqb %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: xorl %eax, %eax ; AVX512BW-NEXT: kortestq %k0, %k0 -; AVX512BW-NEXT: setae %al +; AVX512BW-NEXT: setne %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %bcx = bitcast <8 x i64> %x to i512 @@ -592,19 +592,19 @@ ; ; AVX512F-LABEL: eq_i512: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 +; AVX512F-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 ; AVX512F-NEXT: xorl %eax, %eax ; AVX512F-NEXT: kortestw %k0, %k0 -; AVX512F-NEXT: setb %al +; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: eq_i512: ; AVX512BW: # %bb.0: -; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0 +; AVX512BW-NEXT: vpcmpneqb %zmm1, %zmm0, %k0 ; AVX512BW-NEXT: xorl %eax, %eax ; AVX512BW-NEXT: kortestq %k0, %k0 -; AVX512BW-NEXT: setb %al +; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %bcx = bitcast <8 x i64> %x to i512 @@ -1019,11 +1019,11 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 ; AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 -; AVX512F-NEXT: vpcmpeqd (%rsi), %zmm0, %k1 -; AVX512F-NEXT: vpcmpeqd 64(%rsi), %zmm1, %k0 {%k1} +; AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k0 +; AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 ; AVX512F-NEXT: xorl %eax, %eax -; AVX512F-NEXT: kortestw %k0, %k0 -; AVX512F-NEXT: setae %al +; AVX512F-NEXT: kortestw %k0, %k1 +; AVX512F-NEXT: setne %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1031,11 +1031,11 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 ; AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 -; AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm0, %k1 -; AVX512BW-NEXT: vpcmpeqb 64(%rsi), %zmm1, %k0 {%k1} +; AVX512BW-NEXT: vpcmpneqb 64(%rsi), %zmm1, %k0 +; AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k1 ; AVX512BW-NEXT: xorl %eax, %eax -; AVX512BW-NEXT: kortestq %k0, %k0 -; AVX512BW-NEXT: setae %al +; AVX512BW-NEXT: kortestq %k0, %k1 +; AVX512BW-NEXT: setne %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %a0 = load i512, i512* %a @@ -1113,11 +1113,11 @@ ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 ; AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 -; AVX512F-NEXT: vpcmpeqd (%rsi), %zmm0, %k1 -; AVX512F-NEXT: vpcmpeqd 64(%rsi), %zmm1, %k0 {%k1} +; AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k0 +; AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 ; AVX512F-NEXT: xorl %eax, %eax -; AVX512F-NEXT: kortestw %k0, %k0 -; AVX512F-NEXT: setb %al +; AVX512F-NEXT: kortestw %k0, %k1 +; AVX512F-NEXT: sete %al ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -1125,11 +1125,11 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 ; AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 -; AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm0, %k1 -; AVX512BW-NEXT: vpcmpeqb 64(%rsi), %zmm1, %k0 {%k1} +; AVX512BW-NEXT: vpcmpneqb 64(%rsi), %zmm1, %k0 +; AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k1 ; AVX512BW-NEXT: xorl %eax, %eax -; AVX512BW-NEXT: kortestq %k0, %k0 -; AVX512BW-NEXT: setb %al +; AVX512BW-NEXT: kortestq %k0, %k1 +; AVX512BW-NEXT: sete %al ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %a0 = load i512, i512* %a