diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/include/llvm/CodeGen/TargetLowering.h @@ -1437,7 +1437,9 @@ /// to replace a call to memcmp. The value is set by the target at the /// performance threshold for such a replacement. If OptSize is true, /// return the limit for functions that have OptSize attribute. - unsigned getMaxExpandSizeMemcmp(bool OptSize) const { + unsigned getMaxExpandSizeMemcmp(bool OptSize, bool Equality) const { + if (Equality) + return OptSize ? MaxLoadsPerMemcmpEqOptSize : MaxLoadsPerMemcmpEq; return OptSize ? MaxLoadsPerMemcmpOptSize : MaxLoadsPerMemcmp; } @@ -2885,6 +2887,10 @@ unsigned MaxLoadsPerMemcmp; /// Likewise for functions with the OptSize attribute. unsigned MaxLoadsPerMemcmpOptSize; + /// The maximum when only equality matters (memcmp() == or != 0). + unsigned MaxLoadsPerMemcmpEq; + /// Likewise for functions with the OptSize attribute. + unsigned MaxLoadsPerMemcmpEqOptSize; /// \brief Specify maximum number of store instructions per memmove call. /// diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -568,10 +568,11 @@ // Perform these initializations only once. MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = - MaxLoadsPerMemcmp = 8; + MaxLoadsPerMemcmp = MaxLoadsPerMemcmpEq = 8; MaxGluedStoresPerMemcpy = 0; MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = - MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4; + MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpEqOptSize = + MaxLoadsPerMemcmpOptSize = 4; UseUnderscoreSetJmp = false; UseUnderscoreLongJmp = false; HasMultipleConditionRegisters = false; diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -633,6 +633,8 @@ MaxLoadsPerMemcmpOptSize = 4; MaxLoadsPerMemcmp = Subtarget->requiresStrictAlign() ? MaxLoadsPerMemcmpOptSize : 8; + MaxLoadsPerMemcmpEqOptSize = MaxLoadsPerMemcmpOptSize; + MaxLoadsPerMemcmpEq = MaxLoadsPerMemcmp; setStackPointerRegisterToSaveRestore(AArch64::SP); diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp --- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -622,7 +622,7 @@ AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { TTI::MemCmpExpansionOptions Options; Options.AllowOverlappingLoads = !ST->requiresStrictAlign(); - Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); + Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize, IsZeroCmp); Options.NumLoadsPerBlock = Options.MaxNumLoads; // TODO: Though vector loads usually perform well on AArch64, in some targets // they may wake up the FP unit, which raises the power consumption. Perhaps diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -1228,9 +1228,12 @@ MaxStoresPerMemcpy = 128; MaxStoresPerMemmove = 128; MaxLoadsPerMemcmp = 128; + MaxLoadsPerMemcmpEq = 128; } else { MaxLoadsPerMemcmp = 8; + MaxLoadsPerMemcmpEq = 8; MaxLoadsPerMemcmpOptSize = 4; + MaxLoadsPerMemcmpEqOptSize = 4; } } diff --git a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCTargetTransformInfo.cpp +++ b/lib/Target/PowerPC/PPCTargetTransformInfo.cpp @@ -586,7 +586,7 @@ PPCTTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { TTI::MemCmpExpansionOptions Options; Options.LoadSizes = {8, 4, 2, 1}; - Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); + Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize, IsZeroCmp); return Options; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1890,6 +1890,8 @@ // load/store types (PR33329, PR33914). MaxLoadsPerMemcmp = 2; MaxLoadsPerMemcmpOptSize = 2; + MaxLoadsPerMemcmpEq = 4; + MaxLoadsPerMemcmpEqOptSize = 2; // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment)); @@ -42510,6 +42512,37 @@ return SDValue(); } +/// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a +/// recognizable memcmp expansion. +static bool isOrXorXorTree(SDValue X, bool Root = true) { + if (X.getOpcode() == ISD::OR) + return isOrXorXorTree(X.getOperand(0), false) && + isOrXorXorTree(X.getOperand(1), false); + if (Root) + return false; + return X.getOpcode() == ISD::XOR; +} + +/// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp +/// expansion. +static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG, + EVT VecVT, EVT CmpVT, bool HasPT) { + if (X.getOpcode() == ISD::OR) { + SDValue A = emitOrXorXorTree(X.getOperand(0), DL, DAG, VecVT, CmpVT, HasPT); + SDValue B = emitOrXorXorTree(X.getOperand(1), DL, DAG, VecVT, CmpVT, HasPT); + if (VecVT == CmpVT && HasPT) + return DAG.getNode(ISD::OR, DL, VecVT, A, B); + return DAG.getNode(ISD::AND, DL, CmpVT, A, B); + } else if (X.getOpcode() == ISD::XOR) { + SDValue A = DAG.getBitcast(VecVT, X.getOperand(0)); + SDValue B = DAG.getBitcast(VecVT, X.getOperand(1)); + if (VecVT == CmpVT && HasPT) + return DAG.getNode(ISD::XOR, DL, VecVT, A, B); + return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ); + } + llvm_unreachable("Impossible"); +} + /// Try to map a 128-bit or larger integer comparison to vector instructions /// before type legalization splits it up into chunks. static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG, @@ -42530,10 +42563,8 @@ // logically-combined vector-sized operands compared to zero. This pattern may // be generated by the memcmp expansion pass with oversized integer compares // (see PR33325). - bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR && - X.getOperand(0).getOpcode() == ISD::XOR && - X.getOperand(1).getOpcode() == ISD::XOR; - if (isNullConstant(Y) && !IsOrXorXorCCZero) + bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X); + if (isNullConstant(Y) && !IsOrXorXorTreeCCZero) return SDValue(); // Don't perform this combine if constructing the vector will be expensive. @@ -42543,7 +42574,7 @@ X.getOpcode() == ISD::LOAD; }; if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) && - !IsOrXorXorCCZero) + !IsOrXorXorTreeCCZero) return SDValue(); EVT VT = SetCC->getValueType(0); @@ -42552,6 +42583,7 @@ // Use XOR (plus OR) and PTEST after SSE4.1 and before AVX512. // Otherwise use PCMPEQ (plus AND) and mask testing. + bool DoZext = false; if ((OpSize == 128 && Subtarget.hasSSE2()) || (OpSize == 256 && HasAVX) || (OpSize == 512 && Subtarget.useAVX512Regs())) { @@ -42571,24 +42603,12 @@ } SDValue Cmp; - if (IsOrXorXorCCZero) { + if (IsOrXorXorTreeCCZero) { // This is a bitwise-combined equality comparison of 2 pairs of vectors: // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne // Use 2 vector equality compares and 'and' the results before doing a // MOVMSK. - SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0)); - SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1)); - SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0)); - SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1)); - if (VecVT == CmpVT && HasPT) { - SDValue Cmp1 = DAG.getNode(ISD::XOR, DL, VecVT, A, B); - SDValue Cmp2 = DAG.getNode(ISD::XOR, DL, VecVT, C, D); - Cmp = DAG.getNode(ISD::OR, DL, VecVT, Cmp1, Cmp2); - } else { - SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ); - SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ); - Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2); - } + Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT); } else { SDValue VecX = DAG.getBitcast(VecVT, X); SDValue VecY = DAG.getBitcast(VecVT, Y); diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -3436,8 +3436,8 @@ X86TTIImpl::TTI::MemCmpExpansionOptions X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { TTI::MemCmpExpansionOptions Options; - Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); - Options.NumLoadsPerBlock = 2; + Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize, IsZeroCmp); + Options.NumLoadsPerBlock = 4; if (IsZeroCmp) { // Only enable vector loads for equality comparison. Right now the vector // version is not as fast for three way compare (see #33329). diff --git a/llvm/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll --- a/llvm/test/CodeGen/X86/memcmp.ll +++ b/test/CodeGen/X86/memcmp.ll @@ -11,7 +11,7 @@ ; This tests codegen time inlining/optimization of memcmp ; rdar://6480398 -@.str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1 +@.str = private constant [513 x i8] c"01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901\00", align 1 declare i32 @memcmp(i8*, i8*, i64) @@ -189,7 +189,7 @@ ; X64-NEXT: cmpl $12849, %eax # imm = 0x3231 ; X64-NEXT: setne %al ; X64-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 1), i64 2) nounwind %c = icmp ne i32 %m, 0 ret i1 %c } @@ -431,7 +431,7 @@ ; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231 ; X64-NEXT: sete %al ; X64-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 1), i64 4) nounwind %c = icmp eq i32 %m, 0 ret i1 %c } @@ -679,7 +679,7 @@ ; X64-NEXT: cmpq %rax, (%rdi) ; X64-NEXT: setne %al ; X64-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 8) nounwind %c = icmp ne i32 %m, 0 ret i1 %c } @@ -687,14 +687,20 @@ define i1 @length9_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length9_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $9 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movb 8(%ecx), %cl +; X86-NEXT: xorb 8(%eax), %cl +; X86-NEXT: movzbl %cl, %eax +; X86-NEXT: orl %esi, %eax ; X86-NEXT: sete %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length9_eq: @@ -715,14 +721,20 @@ define i1 @length10_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length10_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $10 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movzwl 8(%ecx), %ecx +; X86-NEXT: xorw 8(%eax), %cx +; X86-NEXT: movzwl %cx, %eax +; X86-NEXT: orl %esi, %eax ; X86-NEXT: sete %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length10_eq: @@ -743,14 +755,19 @@ define i1 @length11_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length11_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $11 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 7(%ecx), %ecx +; X86-NEXT: xorl 7(%eax), %ecx +; X86-NEXT: orl %esi, %ecx ; X86-NEXT: sete %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length11_eq: @@ -770,14 +787,19 @@ define i1 @length12_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length12_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $12 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %ecx +; X86-NEXT: xorl 8(%eax), %ecx +; X86-NEXT: orl %esi, %ecx ; X86-NEXT: setne %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length12_eq: @@ -835,14 +857,23 @@ define i1 @length13_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length13_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $13 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl 8(%eax), %edx +; X86-NEXT: movb 12(%ecx), %cl +; X86-NEXT: xorb 12(%eax), %cl +; X86-NEXT: movzbl %cl, %eax +; X86-NEXT: orl %edx, %eax +; X86-NEXT: orl %esi, %eax ; X86-NEXT: sete %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length13_eq: @@ -862,14 +893,23 @@ define i1 @length14_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length14_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $14 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl 8(%eax), %edx +; X86-NEXT: movzwl 12(%ecx), %ecx +; X86-NEXT: xorw 12(%eax), %cx +; X86-NEXT: movzwl %cx, %eax +; X86-NEXT: orl %edx, %eax +; X86-NEXT: orl %esi, %eax ; X86-NEXT: sete %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length14_eq: @@ -889,14 +929,22 @@ define i1 @length15_eq(i8* %X, i8* %Y) nounwind { ; X86-LABEL: length15_eq: ; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $15 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl 8(%eax), %edx +; X86-NEXT: movl 11(%ecx), %ecx +; X86-NEXT: xorl 11(%eax), %ecx +; X86-NEXT: orl %edx, %ecx +; X86-NEXT: orl %esi, %ecx ; X86-NEXT: sete %al +; X86-NEXT: popl %esi ; X86-NEXT: retl ; ; X64-LABEL: length15_eq: @@ -956,26 +1004,42 @@ define i1 @length16_eq(i8* %x, i8* %y) nounwind { ; X86-NOSSE-LABEL: length16_eq: ; X86-NOSSE: # %bb.0: -; X86-NOSSE-NEXT: pushl $0 -; X86-NOSSE-NEXT: pushl $16 -; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: calll memcmp -; X86-NOSSE-NEXT: addl $16, %esp -; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: pushl %esi +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NOSSE-NEXT: movl (%ecx), %edx +; X86-NOSSE-NEXT: movl 4(%ecx), %esi +; X86-NOSSE-NEXT: xorl (%eax), %edx +; X86-NOSSE-NEXT: xorl 4(%eax), %esi +; X86-NOSSE-NEXT: orl %edx, %esi +; X86-NOSSE-NEXT: movl 8(%ecx), %edx +; X86-NOSSE-NEXT: xorl 8(%eax), %edx +; X86-NOSSE-NEXT: movl 12(%ecx), %ecx +; X86-NOSSE-NEXT: xorl 12(%eax), %ecx +; X86-NOSSE-NEXT: orl %edx, %ecx +; X86-NOSSE-NEXT: orl %esi, %ecx ; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: popl %esi ; X86-NOSSE-NEXT: retl ; ; X86-SSE1-LABEL: length16_eq: ; X86-SSE1: # %bb.0: -; X86-SSE1-NEXT: pushl $0 -; X86-SSE1-NEXT: pushl $16 -; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) -; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) -; X86-SSE1-NEXT: calll memcmp -; X86-SSE1-NEXT: addl $16, %esp -; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: pushl %esi +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE1-NEXT: movl (%ecx), %edx +; X86-SSE1-NEXT: movl 4(%ecx), %esi +; X86-SSE1-NEXT: xorl (%eax), %edx +; X86-SSE1-NEXT: xorl 4(%eax), %esi +; X86-SSE1-NEXT: orl %edx, %esi +; X86-SSE1-NEXT: movl 8(%ecx), %edx +; X86-SSE1-NEXT: xorl 8(%eax), %edx +; X86-SSE1-NEXT: movl 12(%ecx), %ecx +; X86-SSE1-NEXT: xorl 12(%eax), %ecx +; X86-SSE1-NEXT: orl %edx, %ecx +; X86-SSE1-NEXT: orl %esi, %ecx ; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: popl %esi ; X86-SSE1-NEXT: retl ; ; X86-SSE2-LABEL: length16_eq: @@ -1007,34 +1071,64 @@ ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: setne %al ; X64-AVX-NEXT: retq - %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind - %cmp = icmp ne i32 %call, 0 - ret i1 %cmp +; +; X64-AVX512F-LABEL: length16_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length16_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: retq +%call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind +%cmp = icmp ne i32 %call, 0 +ret i1 %cmp } define i1 @length16_eq_const(i8* %X) nounwind { ; X86-NOSSE-LABEL: length16_eq_const: ; X86-NOSSE: # %bb.0: -; X86-NOSSE-NEXT: pushl $0 -; X86-NOSSE-NEXT: pushl $16 -; X86-NOSSE-NEXT: pushl $.L.str -; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: calll memcmp -; X86-NOSSE-NEXT: addl $16, %esp -; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: pushl %esi +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: movl $858927408, %ecx # imm = 0x33323130 +; X86-NOSSE-NEXT: xorl (%eax), %ecx +; X86-NOSSE-NEXT: movl $926299444, %edx # imm = 0x37363534 +; X86-NOSSE-NEXT: xorl 4(%eax), %edx +; X86-NOSSE-NEXT: orl %ecx, %edx +; X86-NOSSE-NEXT: movl $825243960, %ecx # imm = 0x31303938 +; X86-NOSSE-NEXT: xorl 8(%eax), %ecx +; X86-NOSSE-NEXT: movl $892613426, %esi # imm = 0x35343332 +; X86-NOSSE-NEXT: xorl 12(%eax), %esi +; X86-NOSSE-NEXT: orl %ecx, %esi +; X86-NOSSE-NEXT: orl %edx, %esi ; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: popl %esi ; X86-NOSSE-NEXT: retl ; ; X86-SSE1-LABEL: length16_eq_const: ; X86-SSE1: # %bb.0: -; X86-SSE1-NEXT: pushl $0 -; X86-SSE1-NEXT: pushl $16 -; X86-SSE1-NEXT: pushl $.L.str -; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) -; X86-SSE1-NEXT: calll memcmp -; X86-SSE1-NEXT: addl $16, %esp -; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: pushl %esi +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: movl $858927408, %ecx # imm = 0x33323130 +; X86-SSE1-NEXT: xorl (%eax), %ecx +; X86-SSE1-NEXT: movl $926299444, %edx # imm = 0x37363534 +; X86-SSE1-NEXT: xorl 4(%eax), %edx +; X86-SSE1-NEXT: orl %ecx, %edx +; X86-SSE1-NEXT: movl $825243960, %ecx # imm = 0x31303938 +; X86-SSE1-NEXT: xorl 8(%eax), %ecx +; X86-SSE1-NEXT: movl $892613426, %esi # imm = 0x35343332 +; X86-SSE1-NEXT: xorl 12(%eax), %esi +; X86-SSE1-NEXT: orl %ecx, %esi +; X86-SSE1-NEXT: orl %edx, %esi ; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: popl %esi ; X86-SSE1-NEXT: retl ; ; X86-SSE2-LABEL: length16_eq_const: @@ -1063,7 +1157,23 @@ ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: sete %al ; X64-AVX-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind +; +; X64-AVX512F-LABEL: length16_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length16_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: retq +%m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 16) nounwind %c = icmp eq i32 %m, 0 ret i1 %c } @@ -1147,14 +1257,38 @@ ; X64-AVX-LABEL: length24_eq: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 -; X64-AVX-NEXT: vmovq 16(%rdi), %xmm1 -; X64-AVX-NEXT: vmovq 16(%rsi), %xmm2 +; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; X64-AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 ; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: sete %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length24_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512F-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-AVX512F-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; X64-AVX512F-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length24_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512BW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-AVX512BW-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; X64-AVX512BW-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -1213,14 +1347,36 @@ ; X64-AVX-LABEL: length24_eq_const: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 -; X64-AVX-NEXT: vmovq 16(%rdi), %xmm1 +; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 ; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: setne %al ; X64-AVX-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind +; +; X64-AVX512F-LABEL: length24_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length24_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 24) nounwind %c = icmp ne i32 %m, 0 ret i1 %c } @@ -1315,11 +1471,29 @@ ; X64-AVX2-LABEL: length32_eq: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 -; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 -; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 ; X64-AVX2-NEXT: sete %al ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length32_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512F-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512F-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length32_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512BW-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -1390,6 +1564,28 @@ ; X64-AVX-NEXT: vptest %xmm0, %xmm0 ; X64-AVX-NEXT: sete %al ; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length32_eq_prefer128: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512F-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX512F-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX512F-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512F-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length32_eq_prefer128: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX512BW-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX512BW-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX512BW-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX512BW-NEXT: vptest %xmm0, %xmm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: retq %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind %cmp = icmp eq i32 %call, 0 ret i1 %cmp @@ -1464,68 +1660,141 @@ ; X64-AVX2-NEXT: setne %al ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind +; +; X64-AVX512F-LABEL: length32_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512F-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length32_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 32) nounwind %c = icmp ne i32 %m, 0 ret i1 %c } -define i32 @length64(i8* %X, i8* %Y) nounwind { -; X86-LABEL: length64: +define i32 @length63(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length63: ; X86: # %bb.0: ; X86-NEXT: pushl $0 -; X86-NEXT: pushl $64 +; X86-NEXT: pushl $63 ; X86-NEXT: pushl {{[0-9]+}}(%esp) ; X86-NEXT: pushl {{[0-9]+}}(%esp) ; X86-NEXT: calll memcmp ; X86-NEXT: addl $16, %esp ; X86-NEXT: retl ; -; X64-LABEL: length64: +; X64-LABEL: length63: ; X64: # %bb.0: -; X64-NEXT: movl $64, %edx +; X64-NEXT: movl $63, %edx ; X64-NEXT: jmp memcmp # TAILCALL - %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 63) nounwind ret i32 %m } -define i1 @length64_eq(i8* %x, i8* %y) nounwind { -; X86-LABEL: length64_eq: -; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $64 -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: pushl {{[0-9]+}}(%esp) -; X86-NEXT: calll memcmp -; X86-NEXT: addl $16, %esp -; X86-NEXT: testl %eax, %eax -; X86-NEXT: setne %al -; X86-NEXT: retl +define i1 @length63_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length63_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $63 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl ; -; X64-SSE2-LABEL: length64_eq: +; X86-SSE1-LABEL: length63_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $63 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length63_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm3 +; X86-SSE2-NEXT: movdqu 47(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 47(%eax), %xmm4 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm4 +; X86-SSE2-NEXT: pand %xmm3, %xmm4 +; X86-SSE2-NEXT: pand %xmm0, %xmm4 +; X86-SSE2-NEXT: pand %xmm2, %xmm4 +; X86-SSE2-NEXT: pmovmskb %xmm4, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X64-SSE2-LABEL: length63_eq: ; X64-SSE2: # %bb.0: -; X64-SSE2-NEXT: pushq %rax -; X64-SSE2-NEXT: movl $64, %edx -; X64-SSE2-NEXT: callq memcmp -; X64-SSE2-NEXT: testl %eax, %eax +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 47(%rdi), %xmm3 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm4 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm4 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm2, %xmm1 +; X64-SSE2-NEXT: movdqu 47(%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm3, %xmm2 +; X64-SSE2-NEXT: pand %xmm1, %xmm2 +; X64-SSE2-NEXT: pand %xmm0, %xmm2 +; X64-SSE2-NEXT: pand %xmm4, %xmm2 +; X64-SSE2-NEXT: pmovmskb %xmm2, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; X64-SSE2-NEXT: setne %al -; X64-SSE2-NEXT: popq %rcx ; X64-SSE2-NEXT: retq ; -; X64-AVX1-LABEL: length64_eq: +; X64-AVX1-LABEL: length63_eq: ; X64-AVX1: # %bb.0: -; X64-AVX1-NEXT: pushq %rax -; X64-AVX1-NEXT: movl $64, %edx -; X64-AVX1-NEXT: callq memcmp -; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 47(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor 47(%rsi), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor 32(%rsi), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 ; X64-AVX1-NEXT: setne %al -; X64-AVX1-NEXT: popq %rcx ; X64-AVX1-NEXT: retq ; -; X64-AVX2-LABEL: length64_eq: +; X64-AVX2-LABEL: length63_eq: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 -; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 -; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor 31(%rsi), %ymm1, %ymm1 ; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 ; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; X64-AVX2-NEXT: vptest %ymm0, %ymm0 @@ -1533,33 +1802,508 @@ ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; -; X64-AVX512F-LABEL: length64_eq: +; X64-AVX512F-LABEL: length63_eq: ; X64-AVX512F: # %bb.0: -; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512F-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 -; X64-AVX512F-NEXT: kortestw %k0, %k0 -; X64-AVX512F-NEXT: setae %al +; X64-AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512F-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX512F-NEXT: vpxor 31(%rsi), %ymm1, %ymm1 +; X64-AVX512F-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512F-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512F-NEXT: setne %al ; X64-AVX512F-NEXT: vzeroupper ; X64-AVX512F-NEXT: retq ; -; X64-AVX512BW-LABEL: length64_eq: +; X64-AVX512BW-LABEL: length63_eq: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm0, %k0 -; X64-AVX512BW-NEXT: kortestq %k0, %k0 -; X64-AVX512BW-NEXT: setae %al +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512BW-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX512BW-NEXT: vpxor 31(%rsi), %ymm1, %ymm1 +; X64-AVX512BW-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512BW-NEXT: setne %al ; X64-AVX512BW-NEXT: vzeroupper ; X64-AVX512BW-NEXT: retq - %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 63) nounwind %cmp = icmp ne i32 %call, 0 ret i1 %cmp } -define i1 @length64_eq_const(i8* %X) nounwind { -; X86-LABEL: length64_eq_const: -; X86: # %bb.0: -; X86-NEXT: pushl $0 -; X86-NEXT: pushl $64 +define i1 @length63_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length63_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $63 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length63_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $63 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length63_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE2-NEXT: movdqu 47(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand %xmm3, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand %xmm2, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X64-SSE2-LABEL: length63_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 47(%rdi), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm2 +; X64-SSE2-NEXT: pand %xmm3, %xmm2 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pand %xmm2, %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-AVX1-LABEL: length63_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 47(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length63_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length63_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512F-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX512F-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512F-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length63_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512BW-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX512BW-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512BW-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 63) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length64(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length64: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $64 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length64: +; X64: # %bb.0: +; X64-NEXT: movl $64, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind + ret i32 %m +} + +define i1 @length64_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length64_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $64 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length64_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $64 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length64_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm3 +; X86-SSE2-NEXT: movdqu 48(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 48(%eax), %xmm4 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm4 +; X86-SSE2-NEXT: pand %xmm3, %xmm4 +; X86-SSE2-NEXT: pand %xmm0, %xmm4 +; X86-SSE2-NEXT: pand %xmm2, %xmm4 +; X86-SSE2-NEXT: pmovmskb %xmm4, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X64-SSE2-LABEL: length64_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 48(%rdi), %xmm3 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm4 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm4 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm2, %xmm1 +; X64-SSE2-NEXT: movdqu 48(%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm3, %xmm2 +; X64-SSE2-NEXT: pand %xmm1, %xmm2 +; X64-SSE2-NEXT: pand %xmm0, %xmm2 +; X64-SSE2-NEXT: pand %xmm4, %xmm2 +; X64-SSE2-NEXT: pmovmskb %xmm2, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-AVX1-LABEL: length64_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 48(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor 48(%rsi), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor 32(%rsi), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length64_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length64_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vpcmpeqd (%rsi), %zmm0, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setae %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length64_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm0, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setae %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length64_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length64_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $64 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length64_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $64 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length64_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE2-NEXT: movdqu 48(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand %xmm3, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand %xmm2, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X64-SSE2-LABEL: length64_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 48(%rdi), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm2 +; X64-SSE2-NEXT: pand %xmm3, %xmm2 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pand %xmm2, %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-AVX1-LABEL: length64_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 48(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length64_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length64_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vpcmpeqd {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setb %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length64_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setb %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 64) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length128(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length128: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length128: +; X64: # %bb.0: +; X64-NEXT: movl $128, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 128) nounwind + ret i32 %m +} + +define i1 @length128_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length128_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE2-LABEL: length128_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: pushq %rax +; X64-SSE2-NEXT: movl $128, %edx +; X64-SSE2-NEXT: callq memcmp +; X64-SSE2-NEXT: testl %eax, %eax +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: popq %rcx +; X64-SSE2-NEXT: retq +; +; X64-AVX1-LABEL: length128_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $128, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length128_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; X64-AVX2-NEXT: vpxor 96(%rsi), %ymm3, %ymm3 +; X64-AVX2-NEXT: vpxor 64(%rsi), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512F-LABEL: length128_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vpcmpeqd (%rsi), %zmm0, %k1 +; X64-AVX512F-NEXT: vpcmpeqd 64(%rsi), %zmm1, %k0 {%k1} +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setae %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length128_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm0, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb 64(%rsi), %zmm1, %k0 {%k1} +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setae %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 128) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length128_eq_const(i8* %X) nounwind { +; X86-LABEL: length128_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 ; X86-NEXT: pushl $.L.str ; X86-NEXT: pushl {{[0-9]+}}(%esp) ; X86-NEXT: calll memcmp @@ -1568,33 +2312,39 @@ ; X86-NEXT: sete %al ; X86-NEXT: retl ; -; X64-SSE2-LABEL: length64_eq_const: +; X64-SSE2-LABEL: length128_eq_const: ; X64-SSE2: # %bb.0: ; X64-SSE2-NEXT: pushq %rax ; X64-SSE2-NEXT: movl $.L.str, %esi -; X64-SSE2-NEXT: movl $64, %edx +; X64-SSE2-NEXT: movl $128, %edx ; X64-SSE2-NEXT: callq memcmp ; X64-SSE2-NEXT: testl %eax, %eax ; X64-SSE2-NEXT: sete %al ; X64-SSE2-NEXT: popq %rcx ; X64-SSE2-NEXT: retq ; -; X64-AVX1-LABEL: length64_eq_const: +; X64-AVX1-LABEL: length128_eq_const: ; X64-AVX1: # %bb.0: ; X64-AVX1-NEXT: pushq %rax ; X64-AVX1-NEXT: movl $.L.str, %esi -; X64-AVX1-NEXT: movl $64, %edx +; X64-AVX1-NEXT: movl $128, %edx ; X64-AVX1-NEXT: callq memcmp ; X64-AVX1-NEXT: testl %eax, %eax ; X64-AVX1-NEXT: sete %al ; X64-AVX1-NEXT: popq %rcx ; X64-AVX1-NEXT: retq ; -; X64-AVX2-LABEL: length64_eq_const: +; X64-AVX2-LABEL: length128_eq_const: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 ; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm3, %ymm3 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 ; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 ; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 ; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; X64-AVX2-NEXT: vptest %ymm0, %ymm0 @@ -1602,24 +2352,420 @@ ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; -; X64-AVX512F-LABEL: length64_eq_const: +; X64-AVX512F-LABEL: length128_eq_const: ; X64-AVX512F: # %bb.0: ; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512F-NEXT: vpcmpeqd {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vpcmpeqd {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512F-NEXT: vpcmpeqd .L.str+{{.*}}(%rip), %zmm1, %k0 {%k1} ; X64-AVX512F-NEXT: kortestw %k0, %k0 ; X64-AVX512F-NEXT: setb %al ; X64-AVX512F-NEXT: vzeroupper ; X64-AVX512F-NEXT: retq ; -; X64-AVX512BW-LABEL: length64_eq_const: +; X64-AVX512BW-LABEL: length128_eq_const: ; X64-AVX512BW: # %bb.0: ; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 -; X64-AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str+{{.*}}(%rip), %zmm1, %k0 {%k1} +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setb %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 128) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length255(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length255: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length255: +; X64: # %bb.0: +; X64-NEXT: movl $255, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 255) nounwind + ret i32 %m +} + +define i1 @length255_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length255_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE2-LABEL: length255_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: pushq %rax +; X64-SSE2-NEXT: movl $255, %edx +; X64-SSE2-NEXT: callq memcmp +; X64-SSE2-NEXT: testl %eax, %eax +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: popq %rcx +; X64-SSE2-NEXT: retq +; +; X64-AVX-LABEL: length255_eq: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: pushq %rax +; X64-AVX-NEXT: movl $255, %edx +; X64-AVX-NEXT: callq memcmp +; X64-AVX-NEXT: testl %eax, %eax +; X64-AVX-NEXT: setne %al +; X64-AVX-NEXT: popq %rcx +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length255_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 -65(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpeqd -128(%rsi), %zmm0, %k1 +; X64-AVX512F-NEXT: vpcmpeqd -65(%rsi), %zmm1, %k0 {%k1} +; X64-AVX512F-NEXT: vpcmpeqd (%rsi), %zmm2, %k1 +; X64-AVX512F-NEXT: vpcmpeqd 64(%rsi), %zmm3, %k1 {%k1} +; X64-AVX512F-NEXT: kandw %k0, %k1, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setae %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length255_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 -65(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpeqb -128(%rsi), %zmm0, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb -65(%rsi), %zmm1, %k0 {%k1} +; X64-AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm2, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb 64(%rsi), %zmm3, %k1 {%k1} +; X64-AVX512BW-NEXT: kandq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setae %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 255) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length255_eq_const(i8* %X) nounwind { +; X86-LABEL: length255_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE2-LABEL: length255_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: pushq %rax +; X64-SSE2-NEXT: movl $.L.str, %esi +; X64-SSE2-NEXT: movl $255, %edx +; X64-SSE2-NEXT: callq memcmp +; X64-SSE2-NEXT: testl %eax, %eax +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: popq %rcx +; X64-SSE2-NEXT: retq +; +; X64-AVX-LABEL: length255_eq_const: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: pushq %rax +; X64-AVX-NEXT: movl $.L.str, %esi +; X64-AVX-NEXT: movl $255, %edx +; X64-AVX-NEXT: callq memcmp +; X64-AVX-NEXT: testl %eax, %eax +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: popq %rcx +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length255_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 -65(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpeqd .L.str-{{.*}}(%rip), %zmm0, %k1 +; X64-AVX512F-NEXT: vpcmpeqd .L.str-{{.*}}(%rip), %zmm1, %k0 {%k1} +; X64-AVX512F-NEXT: vpcmpeqd {{.*}}(%rip), %zmm2, %k1 +; X64-AVX512F-NEXT: vpcmpeqd .L.str+{{.*}}(%rip), %zmm3, %k1 {%k1} +; X64-AVX512F-NEXT: kandw %k0, %k1, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setb %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length255_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 -65(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str-{{.*}}(%rip), %zmm0, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str-{{.*}}(%rip), %zmm1, %k0 {%k1} +; X64-AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %zmm2, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str+{{.*}}(%rip), %zmm3, %k1 {%k1} +; X64-AVX512BW-NEXT: kandq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setb %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 255) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length256(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length256: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length256: +; X64: # %bb.0: +; X64-NEXT: movl $256, %edx # imm = 0x100 +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 256) nounwind + ret i32 %m +} + +define i1 @length256_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length256_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE2-LABEL: length256_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: pushq %rax +; X64-SSE2-NEXT: movl $256, %edx # imm = 0x100 +; X64-SSE2-NEXT: callq memcmp +; X64-SSE2-NEXT: testl %eax, %eax +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: popq %rcx +; X64-SSE2-NEXT: retq +; +; X64-AVX-LABEL: length256_eq: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: pushq %rax +; X64-AVX-NEXT: movl $256, %edx # imm = 0x100 +; X64-AVX-NEXT: callq memcmp +; X64-AVX-NEXT: testl %eax, %eax +; X64-AVX-NEXT: setne %al +; X64-AVX-NEXT: popq %rcx +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length256_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 -64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpeqd -128(%rsi), %zmm0, %k1 +; X64-AVX512F-NEXT: vpcmpeqd -64(%rsi), %zmm1, %k0 {%k1} +; X64-AVX512F-NEXT: vpcmpeqd (%rsi), %zmm2, %k1 +; X64-AVX512F-NEXT: vpcmpeqd 64(%rsi), %zmm3, %k1 {%k1} +; X64-AVX512F-NEXT: kandw %k0, %k1, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setae %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length256_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 -64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpeqb -128(%rsi), %zmm0, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb -64(%rsi), %zmm1, %k0 {%k1} +; X64-AVX512BW-NEXT: vpcmpeqb (%rsi), %zmm2, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb 64(%rsi), %zmm3, %k1 {%k1} +; X64-AVX512BW-NEXT: kandq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setae %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 256) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length256_eq_const(i8* %X) nounwind { +; X86-LABEL: length256_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE2-LABEL: length256_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: pushq %rax +; X64-SSE2-NEXT: movl $.L.str, %esi +; X64-SSE2-NEXT: movl $256, %edx # imm = 0x100 +; X64-SSE2-NEXT: callq memcmp +; X64-SSE2-NEXT: testl %eax, %eax +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: popq %rcx +; X64-SSE2-NEXT: retq +; +; X64-AVX-LABEL: length256_eq_const: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: pushq %rax +; X64-AVX-NEXT: movl $.L.str, %esi +; X64-AVX-NEXT: movl $256, %edx # imm = 0x100 +; X64-AVX-NEXT: callq memcmp +; X64-AVX-NEXT: testl %eax, %eax +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: popq %rcx +; X64-AVX-NEXT: retq +; +; X64-AVX512F-LABEL: length256_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 -64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpeqd .L.str-{{.*}}(%rip), %zmm0, %k1 +; X64-AVX512F-NEXT: vpcmpeqd .L.str-{{.*}}(%rip), %zmm1, %k0 {%k1} +; X64-AVX512F-NEXT: vpcmpeqd {{.*}}(%rip), %zmm2, %k1 +; X64-AVX512F-NEXT: vpcmpeqd .L.str+{{.*}}(%rip), %zmm3, %k1 {%k1} +; X64-AVX512F-NEXT: kandw %k0, %k1, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setb %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-AVX512BW-LABEL: length256_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 -128(%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 -64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str-{{.*}}(%rip), %zmm0, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str-{{.*}}(%rip), %zmm1, %k0 {%k1} +; X64-AVX512BW-NEXT: vpcmpeqb {{.*}}(%rip), %zmm2, %k1 +; X64-AVX512BW-NEXT: vpcmpeqb .L.str+{{.*}}(%rip), %zmm3, %k1 {%k1} +; X64-AVX512BW-NEXT: kandq %k0, %k1, %k0 ; X64-AVX512BW-NEXT: kortestq %k0, %k0 ; X64-AVX512BW-NEXT: setb %al ; X64-AVX512BW-NEXT: vzeroupper ; X64-AVX512BW-NEXT: retq - %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 256) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length512(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length512: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length512: +; X64: # %bb.0: +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 512) nounwind + ret i32 %m +} + +define i1 @length512_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length512_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length512_eq: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 512) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length512_eq_const(i8* %X) nounwind { +; X86-LABEL: length512_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length512_eq_const: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $.L.str, %esi +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 512) nounwind %c = icmp eq i32 %m, 0 ret i1 %c } diff --git a/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll b/test/Transforms/ExpandMemCmp/X86/memcmp.ll --- a/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll +++ b/test/Transforms/ExpandMemCmp/X86/memcmp.ll @@ -780,8 +780,30 @@ define i32 @cmp_eq9(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq9( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 9) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP15:%.*]] = load i8, i8* [[TMP13]] +; X32-NEXT: [[TMP16:%.*]] = load i8, i8* [[TMP14]] +; X32-NEXT: [[TMP17:%.*]] = zext i8 [[TMP15]] to i32 +; X32-NEXT: [[TMP18:%.*]] = zext i8 [[TMP16]] to i32 +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP21:%.*]] = or i32 [[TMP20]], [[TMP19]] +; X32-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 +; X32-NEXT: [[TMP23:%.*]] = zext i1 [[TMP22]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP23]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -837,8 +859,32 @@ define i32 @cmp_eq10(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq10( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 10) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i16* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i16* +; X32-NEXT: [[TMP17:%.*]] = load i16, i16* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i16, i16* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = zext i16 [[TMP17]] to i32 +; X32-NEXT: [[TMP20:%.*]] = zext i16 [[TMP18]] to i32 +; X32-NEXT: [[TMP21:%.*]] = xor i32 [[TMP19]], [[TMP20]] +; X32-NEXT: [[TMP22:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP23:%.*]] = or i32 [[TMP22]], [[TMP21]] +; X32-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 +; X32-NEXT: [[TMP25:%.*]] = zext i1 [[TMP24]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP25]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -898,8 +944,30 @@ define i32 @cmp_eq11(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq11( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 11) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 7 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 7 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32* +; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP21:%.*]] = or i32 [[TMP20]], [[TMP19]] +; X32-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 +; X32-NEXT: [[TMP23:%.*]] = zext i1 [[TMP22]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP23]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -957,8 +1025,30 @@ define i32 @cmp_eq12(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq12( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 12) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32* +; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP21:%.*]] = or i32 [[TMP20]], [[TMP19]] +; X32-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 +; X32-NEXT: [[TMP23:%.*]] = zext i1 [[TMP22]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP23]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -1018,8 +1108,38 @@ define i32 @cmp_eq13(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq13( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 13) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32* +; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 12 +; X32-NEXT: [[TMP21:%.*]] = getelementptr i8, i8* [[Y]], i8 12 +; X32-NEXT: [[TMP22:%.*]] = load i8, i8* [[TMP20]] +; X32-NEXT: [[TMP23:%.*]] = load i8, i8* [[TMP21]] +; X32-NEXT: [[TMP24:%.*]] = zext i8 [[TMP22]] to i32 +; X32-NEXT: [[TMP25:%.*]] = zext i8 [[TMP23]] to i32 +; X32-NEXT: [[TMP26:%.*]] = xor i32 [[TMP24]], [[TMP25]] +; X32-NEXT: [[TMP27:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP28:%.*]] = or i32 [[TMP19]], [[TMP26]] +; X32-NEXT: [[TMP29:%.*]] = or i32 [[TMP27]], [[TMP28]] +; X32-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 +; X32-NEXT: [[TMP31:%.*]] = zext i1 [[TMP30]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP31]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -1077,8 +1197,40 @@ define i32 @cmp_eq14(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq14( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 14) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32* +; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 12 +; X32-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i16* +; X32-NEXT: [[TMP22:%.*]] = getelementptr i8, i8* [[Y]], i8 12 +; X32-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i16* +; X32-NEXT: [[TMP24:%.*]] = load i16, i16* [[TMP21]] +; X32-NEXT: [[TMP25:%.*]] = load i16, i16* [[TMP23]] +; X32-NEXT: [[TMP26:%.*]] = zext i16 [[TMP24]] to i32 +; X32-NEXT: [[TMP27:%.*]] = zext i16 [[TMP25]] to i32 +; X32-NEXT: [[TMP28:%.*]] = xor i32 [[TMP26]], [[TMP27]] +; X32-NEXT: [[TMP29:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP30:%.*]] = or i32 [[TMP19]], [[TMP28]] +; X32-NEXT: [[TMP31:%.*]] = or i32 [[TMP29]], [[TMP30]] +; X32-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 +; X32-NEXT: [[TMP33:%.*]] = zext i1 [[TMP32]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP33]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -1136,8 +1288,38 @@ define i32 @cmp_eq15(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq15( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 15) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32* +; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 11 +; X32-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i32* +; X32-NEXT: [[TMP22:%.*]] = getelementptr i8, i8* [[Y]], i8 11 +; X32-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32* +; X32-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP21]] +; X32-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP23]] +; X32-NEXT: [[TMP26:%.*]] = xor i32 [[TMP24]], [[TMP25]] +; X32-NEXT: [[TMP27:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP28:%.*]] = or i32 [[TMP19]], [[TMP26]] +; X32-NEXT: [[TMP29:%.*]] = or i32 [[TMP27]], [[TMP28]] +; X32-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 +; X32-NEXT: [[TMP31:%.*]] = zext i1 [[TMP30]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP31]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ; @@ -1195,8 +1377,38 @@ define i32 @cmp_eq16(i8* nocapture readonly %x, i8* nocapture readonly %y) { ; X32-LABEL: @cmp_eq16( -; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 16) -; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0 +; X32-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i32* +; X32-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i32* +; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] +; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] +; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* +; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] +; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] +; X32-NEXT: [[TMP12:%.*]] = xor i32 [[TMP10]], [[TMP11]] +; X32-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X32-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* +; X32-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X32-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to i32* +; X32-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP14]] +; X32-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP16]] +; X32-NEXT: [[TMP19:%.*]] = xor i32 [[TMP17]], [[TMP18]] +; X32-NEXT: [[TMP20:%.*]] = getelementptr i8, i8* [[X]], i8 12 +; X32-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP20]] to i32* +; X32-NEXT: [[TMP22:%.*]] = getelementptr i8, i8* [[Y]], i8 12 +; X32-NEXT: [[TMP23:%.*]] = bitcast i8* [[TMP22]] to i32* +; X32-NEXT: [[TMP24:%.*]] = load i32, i32* [[TMP21]] +; X32-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP23]] +; X32-NEXT: [[TMP26:%.*]] = xor i32 [[TMP24]], [[TMP25]] +; X32-NEXT: [[TMP27:%.*]] = or i32 [[TMP5]], [[TMP12]] +; X32-NEXT: [[TMP28:%.*]] = or i32 [[TMP19]], [[TMP26]] +; X32-NEXT: [[TMP29:%.*]] = or i32 [[TMP27]], [[TMP28]] +; X32-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 +; X32-NEXT: [[TMP31:%.*]] = zext i1 [[TMP30]] to i32 +; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP31]], 0 ; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32 ; X32-NEXT: ret i32 [[CONV]] ;