diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/lib/CodeGen/ExpandMemCmp.cpp --- a/llvm/lib/CodeGen/ExpandMemCmp.cpp +++ b/lib/CodeGen/ExpandMemCmp.cpp @@ -264,9 +264,9 @@ uint64_t OffsetBytes) { if (OffsetBytes > 0) { auto *ByteType = Type::getInt8Ty(CI->getContext()); - Source = Builder.CreateGEP( + Source = Builder.CreateConstGEP1_64( ByteType, Builder.CreateBitCast(Source, ByteType->getPointerTo()), - ConstantInt::get(ByteType, OffsetBytes)); + OffsetBytes); } return Builder.CreateBitCast(Source, LoadSizeType->getPointerTo()); } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -42576,6 +42576,42 @@ return SDValue(); } +/// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a +/// recognizable memcmp expansion. +static bool isOrXorXorTree(SDValue X, bool Root = true) { + if (X.getOpcode() == ISD::OR) + return isOrXorXorTree(X.getOperand(0), false) && + isOrXorXorTree(X.getOperand(1), false); + if (Root) + return false; + return X.getOpcode() == ISD::XOR; +} + +/// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp +/// expansion. +template +static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG, + EVT VecVT, EVT CmpVT, bool HasPT, F SToV) { + if (X.getOpcode() == ISD::OR) { + SDValue A = emitOrXorXorTree(X.getOperand(0), DL, DAG, VecVT, CmpVT, HasPT, SToV); + SDValue B = emitOrXorXorTree(X.getOperand(1), DL, DAG, VecVT, CmpVT, HasPT, SToV); + if (VecVT != CmpVT) + return DAG.getNode(ISD::OR, DL, CmpVT, A, B); + if (HasPT) + return DAG.getNode(ISD::OR, DL, VecVT, A, B); + return DAG.getNode(ISD::AND, DL, CmpVT, A, B); + } else if (X.getOpcode() == ISD::XOR) { + SDValue A = SToV(X.getOperand(0)); + SDValue B = SToV(X.getOperand(1)); + if (VecVT != CmpVT) + return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE); + if (HasPT) + return DAG.getNode(ISD::XOR, DL, VecVT, A, B); + return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ); + } + llvm_unreachable("Impossible"); +} + /// Try to map a 128-bit or larger integer comparison to vector instructions /// before type legalization splits it up into chunks. static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG, @@ -42596,10 +42632,8 @@ // logically-combined vector-sized operands compared to zero. This pattern may // be generated by the memcmp expansion pass with oversized integer compares // (see PR33325). - bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR && - X.getOperand(0).getOpcode() == ISD::XOR && - X.getOperand(1).getOpcode() == ISD::XOR; - if (isNullConstant(Y) && !IsOrXorXorCCZero) + bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X); + if (isNullConstant(Y) && !IsOrXorXorTreeCCZero) return SDValue(); // Don't perform this combine if constructing the vector will be expensive. @@ -42609,7 +42643,7 @@ X.getOpcode() == ISD::LOAD; }; if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) && - !IsOrXorXorCCZero) + !IsOrXorXorTreeCCZero) return SDValue(); EVT VT = SetCC->getValueType(0); @@ -42682,28 +42716,12 @@ }; SDValue Cmp; - if (IsOrXorXorCCZero) { + if (IsOrXorXorTreeCCZero) { // This is a bitwise-combined equality comparison of 2 pairs of vectors: // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne // Use 2 vector equality compares and 'and' the results before doing a // MOVMSK. - SDValue A = ScalarToVector(X.getOperand(0).getOperand(0)); - SDValue B = ScalarToVector(X.getOperand(0).getOperand(1)); - SDValue C = ScalarToVector(X.getOperand(1).getOperand(0)); - SDValue D = ScalarToVector(X.getOperand(1).getOperand(1)); - if (VecVT != CmpVT) { - SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE); - SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETNE); - Cmp = DAG.getNode(ISD::OR, DL, CmpVT, Cmp1, Cmp2); - } else if (HasPT) { - SDValue Cmp1 = DAG.getNode(ISD::XOR, DL, VecVT, A, B); - SDValue Cmp2 = DAG.getNode(ISD::XOR, DL, VecVT, C, D); - Cmp = DAG.getNode(ISD::OR, DL, VecVT, Cmp1, Cmp2); - } else { - SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ); - SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ); - Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2); - } + Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector); } else { SDValue VecX = ScalarToVector(X); SDValue VecY = ScalarToVector(Y); diff --git a/test/CodeGen/X86/memcmp-more-load-pairs.ll b/test/CodeGen/X86/memcmp-more-load-pairs.ll new file mode 100644 --- /dev/null +++ b/test/CodeGen/X86/memcmp-more-load-pairs.ll @@ -0,0 +1,6229 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: This is a copy of llvm/test/CodeGen/X86/memcmp.ll with more load pairs. Please keep it that way. +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefixes=X86,X86-NOSSE +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefixes=X86,X86-SSE,X86-SSE1 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,X86-SSE,X86-SSE2 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=X86,X86-SSE,X86-SSE41 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64,X64-SSE,X64-SSE2 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefixes=X64,X64-SSE,X64-SSE41 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw,+prefer-256-bit | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx512bw,-prefer-256-bit | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX512,X64-AVX512BW +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,+prefer-256-bit,-prefer-mask-registers | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,-prefer-256-bit,-prefer-mask-registers | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX512,X64-AVX512F +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,+prefer-256-bit,+prefer-mask-registers | FileCheck %s --check-prefixes=X64,X64-MIC-AVX,X64-MIC-AVX2 +; RUN: llc -max-loads-per-memcmp=4 -memcmp-num-loads-per-block=4 < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f,-prefer-256-bit,+prefer-mask-registers | FileCheck %s --check-prefixes=X64,X64-MIC-AVX,X64-MIC-AVX512F + +; This tests codegen time inlining/optimization of memcmp +; rdar://6480398 + +@.str = private constant [513 x i8] c"01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901\00", align 1 + +declare i32 @memcmp(i8*, i8*, i64) + +define i32 @length0(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length0: +; X86: # %bb.0: +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl +; +; X64-LABEL: length0: +; X64: # %bb.0: +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + ret i32 %m + } + +define i1 @length0_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length0_eq: +; X86: # %bb.0: +; X86-NEXT: movb $1, %al +; X86-NEXT: retl +; +; X64-LABEL: length0_eq: +; X64: # %bb.0: +; X64-NEXT: movb $1, %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length0_lt(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length0_lt: +; X86: # %bb.0: +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl +; +; X64-LABEL: length0_lt: +; X64: # %bb.0: +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i32 @length2(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length2: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %ecx +; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: rolw $8, %cx +; X86-NEXT: rolw $8, %dx +; X86-NEXT: movzwl %cx, %eax +; X86-NEXT: movzwl %dx, %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: retl +; +; X64-LABEL: length2: +; X64: # %bb.0: +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: movzwl (%rsi), %ecx +; X64-NEXT: rolw $8, %ax +; X64-NEXT: rolw $8, %cx +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: movzwl %cx, %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + ret i32 %m +} + +define i1 @length2_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length2_eq: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %ecx +; X86-NEXT: cmpw (%eax), %cx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length2_eq: +; X64: # %bb.0: +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: cmpw (%rsi), %ax +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length2_lt(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length2_lt: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %ecx +; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: rolw $8, %cx +; X86-NEXT: rolw $8, %dx +; X86-NEXT: movzwl %cx, %eax +; X86-NEXT: movzwl %dx, %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length2_lt: +; X64: # %bb.0: +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: movzwl (%rsi), %ecx +; X64-NEXT: rolw $8, %ax +; X64-NEXT: rolw $8, %cx +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: movzwl %cx, %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length2_gt(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length2_gt: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %ecx +; X86-NEXT: movzwl (%eax), %eax +; X86-NEXT: rolw $8, %cx +; X86-NEXT: rolw $8, %ax +; X86-NEXT: movzwl %cx, %ecx +; X86-NEXT: movzwl %ax, %eax +; X86-NEXT: subl %eax, %ecx +; X86-NEXT: testl %ecx, %ecx +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length2_gt: +; X64: # %bb.0: +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: movzwl (%rsi), %ecx +; X64-NEXT: rolw $8, %ax +; X64-NEXT: rolw $8, %cx +; X64-NEXT: movzwl %ax, %eax +; X64-NEXT: movzwl %cx, %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind + %c = icmp sgt i32 %m, 0 + ret i1 %c +} + +define i1 @length2_eq_const(i8* %X) nounwind { +; X86-LABEL: length2_eq_const: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movzwl (%eax), %eax +; X86-NEXT: cmpl $12849, %eax # imm = 0x3231 +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length2_eq_const: +; X64: # %bb.0: +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: cmpl $12849, %eax # imm = 0x3231 +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 1), i64 2) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length2_eq_nobuiltin_attr: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $2 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length2_eq_nobuiltin_attr: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $2, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length3(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length3: +; X86: # %bb.0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movzwl (%eax), %edx +; X86-NEXT: movzwl (%ecx), %esi +; X86-NEXT: rolw $8, %dx +; X86-NEXT: rolw $8, %si +; X86-NEXT: cmpw %si, %dx +; X86-NEXT: jne .LBB9_1 +; X86-NEXT: # %bb.2: # %loadbb1 +; X86-NEXT: movzbl 2(%eax), %eax +; X86-NEXT: movzbl 2(%ecx), %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB9_1: # %res_block +; X86-NEXT: setae %al +; X86-NEXT: movzbl %al, %eax +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length3: +; X64: # %bb.0: # %loadbb +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: movzwl (%rsi), %ecx +; X64-NEXT: rolw $8, %ax +; X64-NEXT: rolw $8, %cx +; X64-NEXT: cmpw %cx, %ax +; X64-NEXT: jne .LBB9_1 +; X64-NEXT: # %bb.2: # %loadbb1 +; X64-NEXT: movzbl 2(%rdi), %eax +; X64-NEXT: movzbl 2(%rsi), %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: retq +; X64-NEXT: .LBB9_1: # %res_block +; X64-NEXT: setae %al +; X64-NEXT: movzbl %al, %eax +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind + ret i32 %m +} + +define i1 @length3_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length3_eq: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %edx +; X86-NEXT: xorw (%eax), %dx +; X86-NEXT: movb 2(%ecx), %cl +; X86-NEXT: xorb 2(%eax), %cl +; X86-NEXT: movzbl %cl, %eax +; X86-NEXT: orw %dx, %ax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length3_eq: +; X64: # %bb.0: +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: xorw (%rsi), %ax +; X64-NEXT: movb 2(%rdi), %cl +; X64-NEXT: xorb 2(%rsi), %cl +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: orw %ax, %cx +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length4(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length4: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: seta %al +; X86-NEXT: sbbl $0, %eax +; X86-NEXT: retl +; +; X64-LABEL: length4: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx +; X64-NEXT: bswapl %ecx +; X64-NEXT: bswapl %edx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: seta %al +; X64-NEXT: sbbl $0, %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + ret i32 %m +} + +define i1 @length4_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length4_eq: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %ecx +; X86-NEXT: cmpl (%eax), %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length4_eq: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: cmpl (%rsi), %eax +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length4_lt(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length4_lt: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: seta %al +; X86-NEXT: sbbl $0, %eax +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length4_lt: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl (%rsi), %edx +; X64-NEXT: bswapl %ecx +; X64-NEXT: bswapl %edx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpl %edx, %ecx +; X64-NEXT: seta %al +; X64-NEXT: sbbl $0, %eax +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length4_gt(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length4_gt: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %ecx +; X86-NEXT: movl (%eax), %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: seta %dl +; X86-NEXT: sbbl $0, %edx +; X86-NEXT: testl %edx, %edx +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length4_gt: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl (%rsi), %ecx +; X64-NEXT: bswapl %eax +; X64-NEXT: bswapl %ecx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: seta %dl +; X64-NEXT: sbbl $0, %edx +; X64-NEXT: testl %edx, %edx +; X64-NEXT: setg %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind + %c = icmp sgt i32 %m, 0 + ret i1 %c +} + +define i1 @length4_eq_const(i8* %X) nounwind { +; X86-LABEL: length4_eq_const: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231 +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length4_eq_const: +; X64: # %bb.0: +; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231 +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 1), i64 4) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length5(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length5: +; X86: # %bb.0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: movl (%ecx), %esi +; X86-NEXT: bswapl %edx +; X86-NEXT: bswapl %esi +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: jne .LBB16_1 +; X86-NEXT: # %bb.2: # %loadbb1 +; X86-NEXT: movzbl 4(%eax), %eax +; X86-NEXT: movzbl 4(%ecx), %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; X86-NEXT: .LBB16_1: # %res_block +; X86-NEXT: setae %al +; X86-NEXT: movzbl %al, %eax +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length5: +; X64: # %bb.0: # %loadbb +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl (%rsi), %ecx +; X64-NEXT: bswapl %eax +; X64-NEXT: bswapl %ecx +; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: jne .LBB16_1 +; X64-NEXT: # %bb.2: # %loadbb1 +; X64-NEXT: movzbl 4(%rdi), %eax +; X64-NEXT: movzbl 4(%rsi), %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: retq +; X64-NEXT: .LBB16_1: # %res_block +; X64-NEXT: setae %al +; X64-NEXT: movzbl %al, %eax +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + ret i32 %m +} + +define i1 @length5_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length5_eq: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: movb 4(%ecx), %cl +; X86-NEXT: xorb 4(%eax), %cl +; X86-NEXT: movzbl %cl, %eax +; X86-NEXT: orl %edx, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length5_eq: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: xorl (%rsi), %eax +; X64-NEXT: movb 4(%rdi), %cl +; X64-NEXT: xorb 4(%rsi), %cl +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: orl %eax, %ecx +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length5_lt(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length5_lt: +; X86: # %bb.0: # %loadbb +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: movl (%ecx), %esi +; X86-NEXT: bswapl %edx +; X86-NEXT: bswapl %esi +; X86-NEXT: cmpl %esi, %edx +; X86-NEXT: jne .LBB18_1 +; X86-NEXT: # %bb.2: # %loadbb1 +; X86-NEXT: movzbl 4(%eax), %eax +; X86-NEXT: movzbl 4(%ecx), %ecx +; X86-NEXT: subl %ecx, %eax +; X86-NEXT: jmp .LBB18_3 +; X86-NEXT: .LBB18_1: # %res_block +; X86-NEXT: setae %al +; X86-NEXT: movzbl %al, %eax +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: .LBB18_3: # %endblock +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length5_lt: +; X64: # %bb.0: # %loadbb +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl (%rsi), %ecx +; X64-NEXT: bswapl %eax +; X64-NEXT: bswapl %ecx +; X64-NEXT: cmpl %ecx, %eax +; X64-NEXT: jne .LBB18_1 +; X64-NEXT: # %bb.2: # %loadbb1 +; X64-NEXT: movzbl 4(%rdi), %eax +; X64-NEXT: movzbl 4(%rsi), %ecx +; X64-NEXT: subl %ecx, %eax +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq +; X64-NEXT: .LBB18_1: # %res_block +; X64-NEXT: setae %al +; X64-NEXT: movzbl %al, %eax +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind + %c = icmp slt i32 %m, 0 + ret i1 %c +} + +define i1 @length7_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length7_eq: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 3(%ecx), %ecx +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 3(%eax), %ecx +; X86-NEXT: orl %edx, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length7_eq: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl 3(%rdi), %ecx +; X64-NEXT: xorl (%rsi), %eax +; X64-NEXT: xorl 3(%rsi), %ecx +; X64-NEXT: orl %eax, %ecx +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 7) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length8(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length8: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB20_2 +; X86-NEXT: # %bb.1: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: je .LBB20_3 +; X86-NEXT: .LBB20_2: # %res_block +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: setae %al +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: .LBB20_3: # %endblock +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length8: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: seta %al +; X64-NEXT: sbbl $0, %eax +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind + ret i32 %m +} + +define i1 @length8_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length8_eq: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %ecx +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %ecx +; X86-NEXT: orl %edx, %ecx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length8_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: cmpq (%rsi), %rax +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length8_eq_const(i8* %X) nounwind { +; X86-LABEL: length8_eq_const: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl $858927408, %ecx # imm = 0x33323130 +; X86-NEXT: xorl (%eax), %ecx +; X86-NEXT: movl $926299444, %edx # imm = 0x37363534 +; X86-NEXT: xorl 4(%eax), %edx +; X86-NEXT: orl %ecx, %edx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length8_eq_const: +; X64: # %bb.0: +; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130 +; X64-NEXT: cmpq %rax, (%rdi) +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 8) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i1 @length9_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length9_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movb 8(%ecx), %cl +; X86-NEXT: xorb 8(%eax), %cl +; X86-NEXT: movzbl %cl, %eax +; X86-NEXT: orl %esi, %eax +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length9_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: movb 8(%rdi), %cl +; X64-NEXT: xorb 8(%rsi), %cl +; X64-NEXT: movzbl %cl, %ecx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length10_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length10_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movzwl 8(%ecx), %ecx +; X86-NEXT: xorw 8(%eax), %cx +; X86-NEXT: movzwl %cx, %eax +; X86-NEXT: orl %esi, %eax +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length10_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: movzwl 8(%rdi), %ecx +; X64-NEXT: xorw 8(%rsi), %cx +; X64-NEXT: movzwl %cx, %ecx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 10) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length11_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length11_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 7(%ecx), %ecx +; X86-NEXT: xorl 7(%eax), %ecx +; X86-NEXT: orl %esi, %ecx +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length11_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq 3(%rdi), %rcx +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: xorq 3(%rsi), %rcx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 11) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length12_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length12_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %ecx +; X86-NEXT: xorl 8(%eax), %ecx +; X86-NEXT: orl %esi, %ecx +; X86-NEXT: setne %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length12_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: movl 8(%rdi), %ecx +; X64-NEXT: xorl 8(%rsi), %ecx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: setne %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length12(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length12: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB27_3 +; X86-NEXT: # %bb.1: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB27_3 +; X86-NEXT: # %bb.2: # %loadbb2 +; X86-NEXT: movl 8(%esi), %ecx +; X86-NEXT: movl 8(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: je .LBB27_4 +; X86-NEXT: .LBB27_3: # %res_block +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: setae %al +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: .LBB27_4: # %endblock +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length12: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB27_2 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movl 8(%rdi), %ecx +; X64-NEXT: movl 8(%rsi), %edx +; X64-NEXT: bswapl %ecx +; X64-NEXT: bswapl %edx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB27_3 +; X64-NEXT: .LBB27_2: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB27_3: # %endblock +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind + ret i32 %m +} + +define i1 @length13_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length13_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl 8(%eax), %edx +; X86-NEXT: movb 12(%ecx), %cl +; X86-NEXT: xorb 12(%eax), %cl +; X86-NEXT: movzbl %cl, %eax +; X86-NEXT: orl %edx, %eax +; X86-NEXT: orl %esi, %eax +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length13_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq 5(%rdi), %rcx +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: xorq 5(%rsi), %rcx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 13) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length14_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length14_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl 8(%eax), %edx +; X86-NEXT: movzwl 12(%ecx), %ecx +; X86-NEXT: xorw 12(%eax), %cx +; X86-NEXT: movzwl %cx, %eax +; X86-NEXT: orl %edx, %eax +; X86-NEXT: orl %esi, %eax +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length14_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq 6(%rdi), %rcx +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: xorq 6(%rsi), %rcx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 14) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i1 @length15_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length15_eq: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl 4(%ecx), %esi +; X86-NEXT: xorl (%eax), %edx +; X86-NEXT: xorl 4(%eax), %esi +; X86-NEXT: orl %edx, %esi +; X86-NEXT: movl 8(%ecx), %edx +; X86-NEXT: xorl 8(%eax), %edx +; X86-NEXT: movl 11(%ecx), %ecx +; X86-NEXT: xorl 11(%eax), %ecx +; X86-NEXT: orl %edx, %ecx +; X86-NEXT: orl %esi, %ecx +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length15_eq: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq 7(%rdi), %rcx +; X64-NEXT: xorq (%rsi), %rax +; X64-NEXT: xorq 7(%rsi), %rcx +; X64-NEXT: orq %rax, %rcx +; X64-NEXT: sete %al +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 15) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329 + +define i32 @length16(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length16: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB31_4 +; X86-NEXT: # %bb.1: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB31_4 +; X86-NEXT: # %bb.2: # %loadbb2 +; X86-NEXT: movl 8(%esi), %ecx +; X86-NEXT: movl 8(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB31_4 +; X86-NEXT: # %bb.3: # %loadbb3 +; X86-NEXT: movl 12(%esi), %ecx +; X86-NEXT: movl 12(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: je .LBB31_5 +; X86-NEXT: .LBB31_4: # %res_block +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: setae %al +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: .LBB31_5: # %endblock +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length16: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB31_2 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB31_3 +; X64-NEXT: .LBB31_2: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB31_3: # %endblock +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind + ret i32 %m +} + +define i1 @length16_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length16_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl %esi +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NOSSE-NEXT: movl (%ecx), %edx +; X86-NOSSE-NEXT: movl 4(%ecx), %esi +; X86-NOSSE-NEXT: xorl (%eax), %edx +; X86-NOSSE-NEXT: xorl 4(%eax), %esi +; X86-NOSSE-NEXT: orl %edx, %esi +; X86-NOSSE-NEXT: movl 8(%ecx), %edx +; X86-NOSSE-NEXT: xorl 8(%eax), %edx +; X86-NOSSE-NEXT: movl 12(%ecx), %ecx +; X86-NOSSE-NEXT: xorl 12(%eax), %ecx +; X86-NOSSE-NEXT: orl %edx, %ecx +; X86-NOSSE-NEXT: orl %esi, %ecx +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: popl %esi +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length16_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl %esi +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE1-NEXT: movl (%ecx), %edx +; X86-SSE1-NEXT: movl 4(%ecx), %esi +; X86-SSE1-NEXT: xorl (%eax), %edx +; X86-SSE1-NEXT: xorl 4(%eax), %esi +; X86-SSE1-NEXT: orl %edx, %esi +; X86-SSE1-NEXT: movl 8(%ecx), %edx +; X86-SSE1-NEXT: xorl 8(%eax), %edx +; X86-SSE1-NEXT: movl 12(%ecx), %ecx +; X86-SSE1-NEXT: xorl 12(%eax), %ecx +; X86-SSE1-NEXT: orl %edx, %ecx +; X86-SSE1-NEXT: orl %esi, %ecx +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: popl %esi +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length16_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu (%eax), %xmm1 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1 +; X86-SSE2-NEXT: pmovmskb %xmm1, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length16_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu (%eax), %xmm1 +; X86-SSE41-NEXT: pxor %xmm0, %xmm1 +; X86-SSE41-NEXT: ptest %xmm1, %xmm1 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length16_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1 +; X64-SSE2-NEXT: pmovmskb %xmm1, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length16_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm1 +; X64-SSE41-NEXT: pxor %xmm0, %xmm1 +; X64-SSE41-NEXT: ptest %xmm1, %xmm1 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length16_eq: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: setne %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length16_eq: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %xmm1 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-MIC-AVX-NEXT: kortestw %k0, %k0 +; X64-MIC-AVX-NEXT: setne %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length16_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length16_lt: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %ecx +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB33_4 +; X86-NEXT: # %bb.1: # %loadbb1 +; X86-NEXT: movl 4(%esi), %ecx +; X86-NEXT: movl 4(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB33_4 +; X86-NEXT: # %bb.2: # %loadbb2 +; X86-NEXT: movl 8(%esi), %ecx +; X86-NEXT: movl 8(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: jne .LBB33_4 +; X86-NEXT: # %bb.3: # %loadbb3 +; X86-NEXT: movl 12(%esi), %ecx +; X86-NEXT: movl 12(%eax), %edx +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %edx +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: je .LBB33_5 +; X86-NEXT: .LBB33_4: # %res_block +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: cmpl %edx, %ecx +; X86-NEXT: setae %al +; X86-NEXT: leal -1(%eax,%eax), %eax +; X86-NEXT: .LBB33_5: # %endblock +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length16_lt: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB33_2 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB33_3 +; X64-NEXT: .LBB33_2: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB33_3: # %endblock +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length16_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length16_gt: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %esi +; X86-NEXT: movl (%esi), %eax +; X86-NEXT: movl (%edx), %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: jne .LBB34_4 +; X86-NEXT: # %bb.1: # %loadbb1 +; X86-NEXT: movl 4(%esi), %eax +; X86-NEXT: movl 4(%edx), %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: jne .LBB34_4 +; X86-NEXT: # %bb.2: # %loadbb2 +; X86-NEXT: movl 8(%esi), %eax +; X86-NEXT: movl 8(%edx), %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: jne .LBB34_4 +; X86-NEXT: # %bb.3: # %loadbb3 +; X86-NEXT: movl 12(%esi), %eax +; X86-NEXT: movl 12(%edx), %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: je .LBB34_5 +; X86-NEXT: .LBB34_4: # %res_block +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: setae %dl +; X86-NEXT: leal -1(%edx,%edx), %edx +; X86-NEXT: .LBB34_5: # %endblock +; X86-NEXT: testl %edx, %edx +; X86-NEXT: setg %al +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: length16_gt: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq (%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB34_2 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rax +; X64-NEXT: movq 8(%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: je .LBB34_3 +; X64-NEXT: .LBB34_2: # %res_block +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: setae %dl +; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: .LBB34_3: # %endblock +; X64-NEXT: testl %edx, %edx +; X64-NEXT: setg %al +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length16_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length16_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl %esi +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: movl $858927408, %ecx # imm = 0x33323130 +; X86-NOSSE-NEXT: xorl (%eax), %ecx +; X86-NOSSE-NEXT: movl $926299444, %edx # imm = 0x37363534 +; X86-NOSSE-NEXT: xorl 4(%eax), %edx +; X86-NOSSE-NEXT: orl %ecx, %edx +; X86-NOSSE-NEXT: movl $825243960, %ecx # imm = 0x31303938 +; X86-NOSSE-NEXT: xorl 8(%eax), %ecx +; X86-NOSSE-NEXT: movl $892613426, %esi # imm = 0x35343332 +; X86-NOSSE-NEXT: xorl 12(%eax), %esi +; X86-NOSSE-NEXT: orl %ecx, %esi +; X86-NOSSE-NEXT: orl %edx, %esi +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: popl %esi +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length16_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl %esi +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: movl $858927408, %ecx # imm = 0x33323130 +; X86-SSE1-NEXT: xorl (%eax), %ecx +; X86-SSE1-NEXT: movl $926299444, %edx # imm = 0x37363534 +; X86-SSE1-NEXT: xorl 4(%eax), %edx +; X86-SSE1-NEXT: orl %ecx, %edx +; X86-SSE1-NEXT: movl $825243960, %ecx # imm = 0x31303938 +; X86-SSE1-NEXT: xorl 8(%eax), %ecx +; X86-SSE1-NEXT: movl $892613426, %esi # imm = 0x35343332 +; X86-SSE1-NEXT: xorl 12(%eax), %esi +; X86-SSE1-NEXT: orl %ecx, %esi +; X86-SSE1-NEXT: orl %edx, %esi +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: popl %esi +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length16_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length16_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length16_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length16_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length16_eq_const: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length16_eq_const: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [858927408,926299444,825243960,892613426] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-MIC-AVX-NEXT: kortestw %k0, %k0 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 16) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914 + +define i32 @length24(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length24: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $24 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length24: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB36_3 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB36_3 +; X64-NEXT: # %bb.2: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rcx +; X64-NEXT: movq 16(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB36_4 +; X64-NEXT: .LBB36_3: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB36_4: # %endblock +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind + ret i32 %m +} + +define i1 @length24_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length24_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $24 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length24_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $24 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length24_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 8(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 8(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: pand %xmm2, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length24_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 8(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 8(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: por %xmm2, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length24_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1 +; X64-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X64-SSE2-NEXT: pand %xmm1, %xmm2 +; X64-SSE2-NEXT: pmovmskb %xmm2, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length24_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm1 +; X64-SSE41-NEXT: pxor %xmm0, %xmm1 +; X64-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X64-SSE41-NEXT: movq {{.*#+}} xmm2 = mem[0],zero +; X64-SSE41-NEXT: pxor %xmm0, %xmm2 +; X64-SSE41-NEXT: por %xmm1, %xmm2 +; X64-SSE41-NEXT: ptest %xmm2, %xmm2 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length24_eq: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-AVX-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length24_eq: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %xmm1 +; X64-MIC-AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero +; X64-MIC-AVX-NEXT: vmovq {{.*#+}} xmm3 = mem[0],zero +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm2, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length24_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length24_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $24 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length24_lt: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB38_3 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB38_3 +; X64-NEXT: # %bb.2: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rcx +; X64-NEXT: movq 16(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB38_4 +; X64-NEXT: .LBB38_3: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB38_4: # %endblock +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length24_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length24_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $24 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length24_gt: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq (%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB39_3 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rax +; X64-NEXT: movq 8(%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB39_3 +; X64-NEXT: # %bb.2: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: movq 16(%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: je .LBB39_4 +; X64-NEXT: .LBB39_3: # %res_block +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: setae %dl +; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: .LBB39_4: # %endblock +; X64-NEXT: testl %edx, %edx +; X64-NEXT: setg %al +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length24_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length24_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $24 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length24_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $24 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length24_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length24_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: por %xmm1, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length24_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length24_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: por %xmm1, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length24_eq_const: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: setne %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length24_eq_const: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [959985462,858927408,0,0] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [858927408,926299444,825243960,892613426] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: setne %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 24) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length31(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length31: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $31 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length31: +; X64: # %bb.0: +; X64-NEXT: movl $31, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 31) nounwind + ret i32 %m +} + +define i1 @length31_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length31_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $31 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length31_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $31 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length31_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 15(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 15(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: pand %xmm2, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length31_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 15(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 15(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: por %xmm2, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length31_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 15(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X64-SSE2-NEXT: movdqu 15(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: pand %xmm2, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length31_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 15(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE41-NEXT: pxor %xmm0, %xmm2 +; X64-SSE41-NEXT: movdqu 15(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: por %xmm2, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length31_eq: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovdqu 15(%rdi), %xmm1 +; X64-AVX-NEXT: vpxor 15(%rsi), %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length31_eq: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu 15(%rdi), %xmm1 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %xmm2 +; X64-MIC-AVX-NEXT: vmovdqu 15(%rsi), %xmm3 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 31) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length31_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length31_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $31 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length31_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $31, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 31) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length31_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length31_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $31 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length31_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $31, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 31) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length31_eq_prefer128(i8* %x, i8* %y) nounwind "prefer-vector-width"="128" { +; X86-NOSSE-LABEL: length31_eq_prefer128: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $31 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length31_eq_prefer128: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $31 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length31_eq_prefer128: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 15(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 15(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: pand %xmm2, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length31_eq_prefer128: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 15(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 15(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: por %xmm2, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length31_eq_prefer128: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 15(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X64-SSE2-NEXT: movdqu 15(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: pand %xmm2, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length31_eq_prefer128: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 15(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE41-NEXT: pxor %xmm0, %xmm2 +; X64-SSE41-NEXT: movdqu 15(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: por %xmm2, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length31_eq_prefer128: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovdqu 15(%rdi), %xmm1 +; X64-AVX-NEXT: vpxor 15(%rsi), %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length31_eq_prefer128: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu 15(%rdi), %xmm1 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %xmm2 +; X64-MIC-AVX-NEXT: vmovdqu 15(%rsi), %xmm3 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 31) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length31_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length31_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $31 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length31_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $31 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length31_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length31_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: por %xmm1, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length31_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 15(%rdi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length31_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 15(%rdi), %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: por %xmm1, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length31_eq_const: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovdqu 15(%rdi), %xmm1 +; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: setne %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length31_eq_const: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu 15(%rdi), %xmm1 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [943142453,842084409,909456435,809056311] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [858927408,926299444,825243960,892613426] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: setne %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 31) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length32(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length32: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $32 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length32: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB47_4 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB47_4 +; X64-NEXT: # %bb.2: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rcx +; X64-NEXT: movq 16(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB47_4 +; X64-NEXT: # %bb.3: # %loadbb3 +; X64-NEXT: movq 24(%rdi), %rcx +; X64-NEXT: movq 24(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB47_5 +; X64-NEXT: .LBB47_4: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB47_5: # %endblock +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind + ret i32 %m +} + +; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325 + +define i1 @length32_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length32_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $32 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length32_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $32 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length32_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: pand %xmm2, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length32_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: por %xmm2, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length32_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: pand %xmm2, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length32_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE41-NEXT: pxor %xmm0, %xmm2 +; X64-SSE41-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: por %xmm2, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length32_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length32_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: length32_eq: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512-NEXT: sete %al +; X64-AVX512-NEXT: vzeroupper +; X64-AVX512-NEXT: retq +; +; X64-MIC-AVX-LABEL: length32_eq: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %ymm1 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-MIC-AVX-NEXT: kortestw %k0, %k0 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length32_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length32_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $32 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length32_lt: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq (%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB49_4 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rcx +; X64-NEXT: movq 8(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB49_4 +; X64-NEXT: # %bb.2: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rcx +; X64-NEXT: movq 16(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: jne .LBB49_4 +; X64-NEXT: # %bb.3: # %loadbb3 +; X64-NEXT: movq 24(%rdi), %rcx +; X64-NEXT: movq 24(%rsi), %rdx +; X64-NEXT: bswapq %rcx +; X64-NEXT: bswapq %rdx +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: je .LBB49_5 +; X64-NEXT: .LBB49_4: # %res_block +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: cmpq %rdx, %rcx +; X64-NEXT: setae %al +; X64-NEXT: leal -1(%rax,%rax), %eax +; X64-NEXT: .LBB49_5: # %endblock +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length32_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length32_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $32 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length32_gt: +; X64: # %bb.0: +; X64-NEXT: movq (%rdi), %rax +; X64-NEXT: movq (%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB50_4 +; X64-NEXT: # %bb.1: # %loadbb1 +; X64-NEXT: movq 8(%rdi), %rax +; X64-NEXT: movq 8(%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB50_4 +; X64-NEXT: # %bb.2: # %loadbb2 +; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: movq 16(%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: jne .LBB50_4 +; X64-NEXT: # %bb.3: # %loadbb3 +; X64-NEXT: movq 24(%rdi), %rax +; X64-NEXT: movq 24(%rsi), %rcx +; X64-NEXT: bswapq %rax +; X64-NEXT: bswapq %rcx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: je .LBB50_5 +; X64-NEXT: .LBB50_4: # %res_block +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: cmpq %rcx, %rax +; X64-NEXT: setae %dl +; X64-NEXT: leal -1(%rdx,%rdx), %edx +; X64-NEXT: .LBB50_5: # %endblock +; X64-NEXT: testl %edx, %edx +; X64-NEXT: setg %al +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length32_eq_prefer128(i8* %x, i8* %y) nounwind "prefer-vector-width"="128" { +; X86-NOSSE-LABEL: length32_eq_prefer128: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $32 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length32_eq_prefer128: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $32 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length32_eq_prefer128: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: pand %xmm2, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length32_eq_prefer128: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: por %xmm2, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length32_eq_prefer128: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: pand %xmm2, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length32_eq_prefer128: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm2 +; X64-SSE41-NEXT: pxor %xmm0, %xmm2 +; X64-SSE41-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: por %xmm2, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length32_eq_prefer128: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length32_eq_prefer128: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %xmm2 +; X64-MIC-AVX-NEXT: vmovdqu 16(%rsi), %xmm3 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length32_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length32_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $32 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length32_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $32 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length32_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length32_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: por %xmm1, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length32_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length32_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: por %xmm1, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length32_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length32_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: length32_eq_const: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512-NEXT: setne %al +; X64-AVX512-NEXT: vzeroupper +; X64-AVX512-NEXT: retq +; +; X64-MIC-AVX-LABEL: length32_eq_const: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k0 +; X64-MIC-AVX-NEXT: kortestw %k0, %k0 +; X64-MIC-AVX-NEXT: setne %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 32) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length48(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length48: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $48 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length48: +; X64: # %bb.0: +; X64-NEXT: movl $48, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 48) nounwind + ret i32 %m +} + +define i1 @length48_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length48_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $48 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length48_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $48 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length48_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm3 +; X86-SSE2-NEXT: pand %xmm0, %xmm3 +; X86-SSE2-NEXT: pand %xmm2, %xmm3 +; X86-SSE2-NEXT: pmovmskb %xmm3, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length48_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE41-NEXT: pxor %xmm1, %xmm3 +; X86-SSE41-NEXT: por %xmm0, %xmm3 +; X86-SSE41-NEXT: por %xmm2, %xmm3 +; X86-SSE41-NEXT: ptest %xmm3, %xmm3 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length48_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm3 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm3 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm2, %xmm1 +; X64-SSE2-NEXT: pand %xmm0, %xmm1 +; X64-SSE2-NEXT: pand %xmm3, %xmm1 +; X64-SSE2-NEXT: pmovmskb %xmm1, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length48_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm3 +; X64-SSE41-NEXT: pxor %xmm0, %xmm3 +; X64-SSE41-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE41-NEXT: pxor %xmm2, %xmm1 +; X64-SSE41-NEXT: por %xmm0, %xmm1 +; X64-SSE41-NEXT: por %xmm3, %xmm1 +; X64-SSE41-NEXT: ptest %xmm1, %xmm1 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length48_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpxor 32(%rsi), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length48_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %xmm1 +; X64-AVX2-NEXT: vmovdqu 32(%rsi), %xmm2 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: length48_eq: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512-NEXT: vmovdqu 32(%rdi), %xmm1 +; X64-AVX512-NEXT: vmovdqu 32(%rsi), %xmm2 +; X64-AVX512-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512-NEXT: vpxor %ymm2, %ymm1, %ymm1 +; X64-AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512-NEXT: sete %al +; X64-AVX512-NEXT: vzeroupper +; X64-AVX512-NEXT: retq +; +; X64-MIC-AVX-LABEL: length48_eq: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %ymm1 +; X64-MIC-AVX-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-MIC-AVX-NEXT: vmovdqu 32(%rsi), %xmm3 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm2, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 48) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length48_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length48_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $48 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length48_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $48, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 48) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length48_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length48_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $48 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length48_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $48, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 48) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length48_eq_prefer128(i8* %x, i8* %y) nounwind "prefer-vector-width"="128" { +; X86-NOSSE-LABEL: length48_eq_prefer128: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $48 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length48_eq_prefer128: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $48 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length48_eq_prefer128: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm3 +; X86-SSE2-NEXT: pand %xmm0, %xmm3 +; X86-SSE2-NEXT: pand %xmm2, %xmm3 +; X86-SSE2-NEXT: pmovmskb %xmm3, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length48_eq_prefer128: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE41-NEXT: pxor %xmm1, %xmm3 +; X86-SSE41-NEXT: por %xmm0, %xmm3 +; X86-SSE41-NEXT: por %xmm2, %xmm3 +; X86-SSE41-NEXT: ptest %xmm3, %xmm3 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length48_eq_prefer128: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm3 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm3 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm2, %xmm1 +; X64-SSE2-NEXT: pand %xmm0, %xmm1 +; X64-SSE2-NEXT: pand %xmm3, %xmm1 +; X64-SSE2-NEXT: pmovmskb %xmm1, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length48_eq_prefer128: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm3 +; X64-SSE41-NEXT: pxor %xmm0, %xmm3 +; X64-SSE41-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE41-NEXT: pxor %xmm2, %xmm1 +; X64-SSE41-NEXT: por %xmm0, %xmm1 +; X64-SSE41-NEXT: por %xmm3, %xmm1 +; X64-SSE41-NEXT: ptest %xmm1, %xmm1 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX-LABEL: length48_eq_prefer128: +; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX-NEXT: vpxor 32(%rsi), %xmm2, %xmm2 +; X64-AVX-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vptest %xmm0, %xmm0 +; X64-AVX-NEXT: sete %al +; X64-AVX-NEXT: retq +; +; X64-MIC-AVX-LABEL: length48_eq_prefer128: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %xmm0 +; X64-MIC-AVX-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-MIC-AVX-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %xmm3 +; X64-MIC-AVX-NEXT: vmovdqu 16(%rsi), %xmm4 +; X64-MIC-AVX-NEXT: vmovdqu 32(%rsi), %xmm5 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm4, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm0, %k1 +; X64-MIC-AVX-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm5, %zmm2, %k1 +; X64-MIC-AVX-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 48) nounwind + %cmp = icmp eq i32 %call, 0 + ret i1 %cmp +} + +define i1 @length48_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length48_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $48 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length48_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $48 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length48_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand %xmm1, %xmm2 +; X86-SSE2-NEXT: pand %xmm0, %xmm2 +; X86-SSE2-NEXT: pmovmskb %xmm2, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length48_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 +; X86-SSE41-NEXT: por %xmm1, %xmm2 +; X86-SSE41-NEXT: por %xmm0, %xmm2 +; X86-SSE41-NEXT: ptest %xmm2, %xmm2 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length48_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm2 +; X64-SSE2-NEXT: pand %xmm1, %xmm2 +; X64-SSE2-NEXT: pand %xmm0, %xmm2 +; X64-SSE2-NEXT: pmovmskb %xmm2, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length48_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm2 +; X64-SSE41-NEXT: por %xmm1, %xmm2 +; X64-SSE41-NEXT: por %xmm0, %xmm2 +; X64-SSE41-NEXT: ptest %xmm2, %xmm2 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length48_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length48_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %xmm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: length48_eq_const: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512-NEXT: vmovdqu 32(%rdi), %xmm1 +; X64-AVX512-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512-NEXT: setne %al +; X64-AVX512-NEXT: vzeroupper +; X64-AVX512-NEXT: retq +; +; X64-MIC-AVX-LABEL: length48_eq_const: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX-NEXT: vmovdqu 32(%rdi), %xmm1 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [892613426,959985462,858927408,926299444,0,0,0,0] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: setne %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 48) nounwind + %c = icmp ne i32 %m, 0 + ret i1 %c +} + +define i32 @length63(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length63: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $63 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length63: +; X64: # %bb.0: +; X64-NEXT: movl $63, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 63) nounwind + ret i32 %m +} + +define i1 @length63_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length63_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $63 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length63_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $63 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length63_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm3 +; X86-SSE2-NEXT: movdqu 47(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 47(%eax), %xmm4 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm4 +; X86-SSE2-NEXT: pand %xmm3, %xmm4 +; X86-SSE2-NEXT: pand %xmm0, %xmm4 +; X86-SSE2-NEXT: pand %xmm2, %xmm4 +; X86-SSE2-NEXT: pmovmskb %xmm4, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length63_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE41-NEXT: pxor %xmm1, %xmm3 +; X86-SSE41-NEXT: movdqu 47(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu 47(%eax), %xmm4 +; X86-SSE41-NEXT: pxor %xmm1, %xmm4 +; X86-SSE41-NEXT: por %xmm3, %xmm4 +; X86-SSE41-NEXT: por %xmm0, %xmm4 +; X86-SSE41-NEXT: por %xmm2, %xmm4 +; X86-SSE41-NEXT: ptest %xmm4, %xmm4 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length63_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 47(%rdi), %xmm3 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm4 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm4 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm2, %xmm1 +; X64-SSE2-NEXT: movdqu 47(%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm3, %xmm2 +; X64-SSE2-NEXT: pand %xmm1, %xmm2 +; X64-SSE2-NEXT: pand %xmm0, %xmm2 +; X64-SSE2-NEXT: pand %xmm4, %xmm2 +; X64-SSE2-NEXT: pmovmskb %xmm2, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length63_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: movdqu 47(%rdi), %xmm3 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm4 +; X64-SSE41-NEXT: pxor %xmm0, %xmm4 +; X64-SSE41-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE41-NEXT: pxor %xmm2, %xmm1 +; X64-SSE41-NEXT: movdqu 47(%rsi), %xmm2 +; X64-SSE41-NEXT: pxor %xmm3, %xmm2 +; X64-SSE41-NEXT: por %xmm1, %xmm2 +; X64-SSE41-NEXT: por %xmm0, %xmm2 +; X64-SSE41-NEXT: por %xmm4, %xmm2 +; X64-SSE41-NEXT: ptest %xmm2, %xmm2 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length63_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 47(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor 47(%rsi), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor 32(%rsi), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length63_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor 31(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: length63_eq: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX512-NEXT: vpxor 31(%rsi), %ymm1, %ymm1 +; X64-AVX512-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512-NEXT: setne %al +; X64-AVX512-NEXT: vzeroupper +; X64-AVX512-NEXT: retq +; +; X64-MIC-AVX-LABEL: length63_eq: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-MIC-AVX-NEXT: vmovdqu (%rsi), %ymm2 +; X64-MIC-AVX-NEXT: vmovdqu 31(%rsi), %ymm3 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: setne %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 63) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length63_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length63_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $63 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length63_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $63, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 63) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length63_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length63_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $63 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length63_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $63, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 63) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length63_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length63_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $63 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length63_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $63 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length63_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE2-NEXT: movdqu 47(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand %xmm3, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand %xmm2, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length63_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE41-NEXT: movdqu 47(%eax), %xmm3 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 +; X86-SSE41-NEXT: por %xmm3, %xmm2 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: por %xmm2, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: por %xmm1, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length63_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 47(%rdi), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm2 +; X64-SSE2-NEXT: pand %xmm3, %xmm2 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pand %xmm2, %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length63_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: movdqu 47(%rdi), %xmm3 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm3 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm2 +; X64-SSE41-NEXT: por %xmm3, %xmm2 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm1 +; X64-SSE41-NEXT: por %xmm2, %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: por %xmm1, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length63_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 47(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length63_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: length63_eq_const: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX512-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-AVX512-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX512-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX512-NEXT: vptest %ymm0, %ymm0 +; X64-AVX512-NEXT: sete %al +; X64-AVX512-NEXT: vzeroupper +; X64-AVX512-NEXT: retq +; +; X64-MIC-AVX-LABEL: length63_eq_const: +; X64-MIC-AVX: # %bb.0: +; X64-MIC-AVX-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX-NEXT: vmovdqu 31(%rdi), %ymm1 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [875770417,943142453,842084409,909456435,809056311,875770417,943142453,842084409] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm2, %zmm1, %k0 +; X64-MIC-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX-NEXT: sete %al +; X64-MIC-AVX-NEXT: vzeroupper +; X64-MIC-AVX-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 63) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length64(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length64: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $64 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length64: +; X64: # %bb.0: +; X64-NEXT: movl $64, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind + ret i32 %m +} + +define i1 @length64_eq(i8* %x, i8* %y) nounwind { +; X86-NOSSE-LABEL: length64_eq: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $64 +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: setne %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length64_eq: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $64 +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: setne %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length64_eq: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE2-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu (%eax), %xmm2 +; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X86-SSE2-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm3 +; X86-SSE2-NEXT: movdqu 48(%ecx), %xmm1 +; X86-SSE2-NEXT: movdqu 48(%eax), %xmm4 +; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm4 +; X86-SSE2-NEXT: pand %xmm3, %xmm4 +; X86-SSE2-NEXT: pand %xmm0, %xmm4 +; X86-SSE2-NEXT: pand %xmm2, %xmm4 +; X86-SSE2-NEXT: pmovmskb %xmm4, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: setne %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length64_eq: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE41-NEXT: movdqu (%ecx), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu (%eax), %xmm2 +; X86-SSE41-NEXT: pxor %xmm0, %xmm2 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm0 +; X86-SSE41-NEXT: pxor %xmm1, %xmm0 +; X86-SSE41-NEXT: movdqu 32(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm3 +; X86-SSE41-NEXT: pxor %xmm1, %xmm3 +; X86-SSE41-NEXT: movdqu 48(%ecx), %xmm1 +; X86-SSE41-NEXT: movdqu 48(%eax), %xmm4 +; X86-SSE41-NEXT: pxor %xmm1, %xmm4 +; X86-SSE41-NEXT: por %xmm3, %xmm4 +; X86-SSE41-NEXT: por %xmm0, %xmm4 +; X86-SSE41-NEXT: por %xmm2, %xmm4 +; X86-SSE41-NEXT: ptest %xmm4, %xmm4 +; X86-SSE41-NEXT: setne %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length64_eq: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 48(%rdi), %xmm3 +; X64-SSE2-NEXT: movdqu (%rsi), %xmm4 +; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm4 +; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0 +; X64-SSE2-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE2-NEXT: pcmpeqb %xmm2, %xmm1 +; X64-SSE2-NEXT: movdqu 48(%rsi), %xmm2 +; X64-SSE2-NEXT: pcmpeqb %xmm3, %xmm2 +; X64-SSE2-NEXT: pand %xmm1, %xmm2 +; X64-SSE2-NEXT: pand %xmm0, %xmm2 +; X64-SSE2-NEXT: pand %xmm4, %xmm2 +; X64-SSE2-NEXT: pmovmskb %xmm2, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: setne %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length64_eq: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: movdqu 48(%rdi), %xmm3 +; X64-SSE41-NEXT: movdqu (%rsi), %xmm4 +; X64-SSE41-NEXT: pxor %xmm0, %xmm4 +; X64-SSE41-NEXT: movdqu 16(%rsi), %xmm0 +; X64-SSE41-NEXT: pxor %xmm1, %xmm0 +; X64-SSE41-NEXT: movdqu 32(%rsi), %xmm1 +; X64-SSE41-NEXT: pxor %xmm2, %xmm1 +; X64-SSE41-NEXT: movdqu 48(%rsi), %xmm2 +; X64-SSE41-NEXT: pxor %xmm3, %xmm2 +; X64-SSE41-NEXT: por %xmm1, %xmm2 +; X64-SSE41-NEXT: por %xmm0, %xmm2 +; X64-SSE41-NEXT: por %xmm4, %xmm2 +; X64-SSE41-NEXT: ptest %xmm2, %xmm2 +; X64-SSE41-NEXT: setne %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length64_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 48(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor 48(%rsi), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor 32(%rsi), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor 16(%rsi), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor (%rsi), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length64_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length64_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length64_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length64_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu (%rsi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rsi), %ymm3 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm2, %zmm0, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length64_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k0 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k0 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length64_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length64_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $64 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length64_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $64, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length64_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length64_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $64 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length64_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $64, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length64_eq_const(i8* %X) nounwind { +; X86-NOSSE-LABEL: length64_eq_const: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl $0 +; X86-NOSSE-NEXT: pushl $64 +; X86-NOSSE-NEXT: pushl $.L.str +; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: calll memcmp +; X86-NOSSE-NEXT: addl $16, %esp +; X86-NOSSE-NEXT: testl %eax, %eax +; X86-NOSSE-NEXT: sete %al +; X86-NOSSE-NEXT: retl +; +; X86-SSE1-LABEL: length64_eq_const: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl $0 +; X86-SSE1-NEXT: pushl $64 +; X86-SSE1-NEXT: pushl $.L.str +; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: calll memcmp +; X86-SSE1-NEXT: addl $16, %esp +; X86-SSE1-NEXT: testl %eax, %eax +; X86-SSE1-NEXT: sete %al +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: length64_eq_const: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movdqu (%eax), %xmm0 +; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE2-NEXT: movdqu 48(%eax), %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2 +; X86-SSE2-NEXT: pand %xmm3, %xmm2 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1 +; X86-SSE2-NEXT: pand %xmm2, %xmm1 +; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0 +; X86-SSE2-NEXT: pand %xmm1, %xmm0 +; X86-SSE2-NEXT: pmovmskb %xmm0, %eax +; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X86-SSE2-NEXT: sete %al +; X86-SSE2-NEXT: retl +; +; X86-SSE41-LABEL: length64_eq_const: +; X86-SSE41: # %bb.0: +; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE41-NEXT: movdqu (%eax), %xmm0 +; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1 +; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2 +; X86-SSE41-NEXT: movdqu 48(%eax), %xmm3 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2 +; X86-SSE41-NEXT: por %xmm3, %xmm2 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1 +; X86-SSE41-NEXT: por %xmm2, %xmm1 +; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0 +; X86-SSE41-NEXT: por %xmm1, %xmm0 +; X86-SSE41-NEXT: ptest %xmm0, %xmm0 +; X86-SSE41-NEXT: sete %al +; X86-SSE41-NEXT: retl +; +; X64-SSE2-LABEL: length64_eq_const: +; X64-SSE2: # %bb.0: +; X64-SSE2-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE2-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE2-NEXT: movdqu 48(%rdi), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm3 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm2 +; X64-SSE2-NEXT: pand %xmm3, %xmm2 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1 +; X64-SSE2-NEXT: pand %xmm2, %xmm1 +; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0 +; X64-SSE2-NEXT: pand %xmm1, %xmm0 +; X64-SSE2-NEXT: pmovmskb %xmm0, %eax +; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; X64-SSE2-NEXT: sete %al +; X64-SSE2-NEXT: retq +; +; X64-SSE41-LABEL: length64_eq_const: +; X64-SSE41: # %bb.0: +; X64-SSE41-NEXT: movdqu (%rdi), %xmm0 +; X64-SSE41-NEXT: movdqu 16(%rdi), %xmm1 +; X64-SSE41-NEXT: movdqu 32(%rdi), %xmm2 +; X64-SSE41-NEXT: movdqu 48(%rdi), %xmm3 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm3 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm2 +; X64-SSE41-NEXT: por %xmm3, %xmm2 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm1 +; X64-SSE41-NEXT: por %xmm2, %xmm1 +; X64-SSE41-NEXT: pxor {{.*}}(%rip), %xmm0 +; X64-SSE41-NEXT: por %xmm1, %xmm0 +; X64-SSE41-NEXT: ptest %xmm0, %xmm0 +; X64-SSE41-NEXT: sete %al +; X64-SSE41-NEXT: retq +; +; X64-AVX1-LABEL: length64_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0 +; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1 +; X64-AVX1-NEXT: vmovdqu 32(%rdi), %xmm2 +; X64-AVX1-NEXT: vmovdqu 48(%rdi), %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm3, %xmm3 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm2, %xmm2 +; X64-AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 +; X64-AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 +; X64-AVX1-NEXT: vptest %xmm0, %xmm0 +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length64_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length64_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512BW-NEXT: kortestq %k0, %k0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length64_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512F-NEXT: kortestw %k0, %k0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length64_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [892613426,959985462,858927408,926299444,825243960,892613426,959985462,858927408] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm2, %zmm1, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length64_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k0 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k0 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 64) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length96(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length96: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $96 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length96: +; X64: # %bb.0: +; X64-NEXT: movl $96, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 96) nounwind + ret i32 %m +} + +define i1 @length96_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length96_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $96 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length96_eq: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $96, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: setne %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length96_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $96, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length96_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpxor 64(%rsi), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length96_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu 64(%rdi), %ymm1 +; X64-AVX512BW-NEXT: vmovdqu 64(%rsi), %ymm2 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb %zmm2, %zmm1, %k1 +; X64-AVX512BW-NEXT: kortestq %k1, %k0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length96_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu 64(%rdi), %ymm1 +; X64-AVX512F-NEXT: vmovdqu 64(%rsi), %ymm2 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k0 +; X64-AVX512F-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 +; X64-AVX512F-NEXT: kortestw %k1, %k0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length96_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqu (%rsi), %ymm3 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rsi), %ymm4 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rsi), %ymm5 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm4, %zmm1, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm3, %zmm0, %k1 +; X64-MIC-AVX2-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm5, %zmm2, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length96_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu 64(%rdi), %ymm1 +; X64-MIC-AVX512F-NEXT: vmovdqu 64(%rsi), %ymm2 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 96) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length96_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length96_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $96 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length96_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $96, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 96) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length96_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length96_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $96 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length96_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $96, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 96) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length96_eq_const(i8* %X) nounwind { +; X86-LABEL: length96_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $96 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length96_eq_const: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $.L.str, %esi +; X64-SSE-NEXT: movl $96, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: sete %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length96_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $.L.str, %esi +; X64-AVX1-NEXT: movl $96, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length96_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length96_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu 64(%rdi), %ymm1 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm1, %k1 +; X64-AVX512BW-NEXT: kortestq %k1, %k0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length96_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu 64(%rdi), %ymm1 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k0 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm1, %k1 +; X64-AVX512F-NEXT: kortestw %k1, %k0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length96_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [892613426,959985462,858927408,926299444,825243960,892613426,959985462,858927408] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm3, %zmm1, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm1, %zmm0, %k1 +; X64-MIC-AVX2-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [926299444,825243960,892613426,959985462,858927408,926299444,825243960,892613426] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm0, %zmm2, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length96_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu 64(%rdi), %ymm1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm1, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 96) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length127(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length127: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $127 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length127: +; X64: # %bb.0: +; X64-NEXT: movl $127, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 127) nounwind + ret i32 %m +} + +define i1 @length127_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length127_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $127 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length127_eq: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $127, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: setne %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length127_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $127, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length127_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vmovdqu 95(%rdi), %ymm3 +; X64-AVX2-NEXT: vpxor 95(%rsi), %ymm3, %ymm3 +; X64-AVX2-NEXT: vpxor 64(%rsi), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length127_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 63(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vpcmpneqb 63(%rsi), %zmm1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length127_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 63(%rdi), %zmm1 +; X64-AVX512F-NEXT: vpcmpneqd 63(%rsi), %zmm1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length127_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqu 95(%rdi), %ymm3 +; X64-MIC-AVX2-NEXT: vmovdqu (%rsi), %ymm4 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rsi), %ymm5 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rsi), %ymm6 +; X64-MIC-AVX2-NEXT: vmovdqu 95(%rsi), %ymm7 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm7, %zmm3, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm6, %zmm2, %k1 +; X64-MIC-AVX2-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm5, %zmm1, %k1 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm4, %zmm0, %k2 +; X64-MIC-AVX2-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length127_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 63(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 63(%rsi), %zmm1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 127) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length127_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length127_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $127 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length127_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $127, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 127) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length127_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length127_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $127 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length127_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $127, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 127) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length127_eq_const(i8* %X) nounwind { +; X86-LABEL: length127_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $127 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length127_eq_const: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $.L.str, %esi +; X64-SSE-NEXT: movl $127, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: sete %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length127_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $.L.str, %esi +; X64-AVX1-NEXT: movl $127, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length127_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vmovdqu 95(%rdi), %ymm3 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm3, %ymm3 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length127_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 63(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length127_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 63(%rdi), %zmm1 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length127_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqu 95(%rdi), %ymm3 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [943142453,842084409,909456435,809056311,875770417,943142453,842084409,909456435] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm4, %zmm3, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [926299444,825243960,892613426,959985462,858927408,926299444,825243960,892613426] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 +; X64-MIC-AVX2-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [892613426,959985462,858927408,926299444,825243960,892613426,959985462,858927408] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm1, %zmm0, %k2 +; X64-MIC-AVX2-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length127_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 63(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 127) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length128(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length128: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length128: +; X64: # %bb.0: +; X64-NEXT: movl $128, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 128) nounwind + ret i32 %m +} + +define i1 @length128_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length128_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length128_eq: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $128, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: setne %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length128_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $128, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length128_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; X64-AVX2-NEXT: vpxor 96(%rsi), %ymm3, %ymm3 +; X64-AVX2-NEXT: vpxor 64(%rsi), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; X64-AVX2-NEXT: vpxor 32(%rsi), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor (%rsi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length128_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vpcmpneqb 64(%rsi), %zmm1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length128_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length128_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; X64-MIC-AVX2-NEXT: vmovdqu (%rsi), %ymm4 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rsi), %ymm5 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rsi), %ymm6 +; X64-MIC-AVX2-NEXT: vmovdqu 96(%rsi), %ymm7 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm7, %zmm3, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm6, %zmm2, %k1 +; X64-MIC-AVX2-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm5, %zmm1, %k1 +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm4, %zmm0, %k2 +; X64-MIC-AVX2-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length128_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 128) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length128_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length128_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length128_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $128, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 128) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length128_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length128_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length128_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $128, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 128) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length128_eq_const(i8* %X) nounwind { +; X86-LABEL: length128_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $128 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length128_eq_const: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $.L.str, %esi +; X64-SSE-NEXT: movl $128, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: sete %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length128_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $.L.str, %esi +; X64-AVX1-NEXT: movl $128, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length128_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm3, %ymm3 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm2, %ymm2 +; X64-AVX2-NEXT: vpor %ymm3, %ymm2, %ymm2 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm1, %ymm1 +; X64-AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vptest %ymm0, %ymm0 +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length128_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length128_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length128_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; X64-MIC-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; X64-MIC-AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; X64-MIC-AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [959985462,858927408,926299444,825243960,892613426,959985462,858927408,926299444] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm4, %zmm3, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [926299444,825243960,892613426,959985462,858927408,926299444,825243960,892613426] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm3, %zmm2, %k1 +; X64-MIC-AVX2-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [892613426,959985462,858927408,926299444,825243960,892613426,959985462,858927408] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm2, %zmm1, %k1 +; X64-MIC-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [858927408,926299444,825243960,892613426,959985462,858927408,926299444,825243960] +; X64-MIC-AVX2-NEXT: vpcmpneqd %zmm1, %zmm0, %k2 +; X64-MIC-AVX2-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX2-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: vzeroupper +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length128_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 128) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length192(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length192: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $192 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length192: +; X64: # %bb.0: +; X64-NEXT: movl $192, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 192) nounwind + ret i32 %m +} + +define i1 @length192_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length192_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $192 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length192_eq: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $192, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: setne %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length192_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $192, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length192_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: pushq %rax +; X64-AVX2-NEXT: movl $192, %edx +; X64-AVX2-NEXT: callq memcmp +; X64-AVX2-NEXT: testl %eax, %eax +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: popq %rcx +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length192_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512BW-NEXT: vpcmpneqb 64(%rsi), %zmm1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k1 +; X64-AVX512BW-NEXT: korq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb 128(%rsi), %zmm2, %k1 +; X64-AVX512BW-NEXT: kortestq %k1, %k0 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length192_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 +; X64-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd 128(%rsi), %zmm2, %k1 +; X64-AVX512F-NEXT: kortestw %k1, %k0 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length192_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: pushq %rax +; X64-MIC-AVX2-NEXT: movl $192, %edx +; X64-MIC-AVX2-NEXT: callq memcmp +; X64-MIC-AVX2-NEXT: testl %eax, %eax +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: popq %rcx +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length192_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k1 +; X64-MIC-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 128(%rsi), %zmm2, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 192) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length192_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length192_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $192 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length192_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $192, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 192) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length192_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length192_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $192 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length192_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $192, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 192) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length192_eq_const(i8* %X) nounwind { +; X86-LABEL: length192_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $192 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length192_eq_const: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $.L.str, %esi +; X64-SSE-NEXT: movl $192, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: sete %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length192_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $.L.str, %esi +; X64-AVX1-NEXT: movl $192, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length192_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: pushq %rax +; X64-AVX2-NEXT: movl $.L.str, %esi +; X64-AVX2-NEXT: movl $192, %edx +; X64-AVX2-NEXT: callq memcmp +; X64-AVX2-NEXT: testl %eax, %eax +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: popq %rcx +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length192_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512BW-NEXT: korq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-AVX512BW-NEXT: kortestq %k1, %k0 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length192_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k1 +; X64-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-AVX512F-NEXT: kortestw %k1, %k0 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length192_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: pushq %rax +; X64-MIC-AVX2-NEXT: movl $.L.str, %esi +; X64-MIC-AVX2-NEXT: movl $192, %edx +; X64-MIC-AVX2-NEXT: callq memcmp +; X64-MIC-AVX2-NEXT: testl %eax, %eax +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: popq %rcx +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length192_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k1 +; X64-MIC-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k1, %k0 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 192) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length255(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length255: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length255: +; X64: # %bb.0: +; X64-NEXT: movl $255, %edx +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 255) nounwind + ret i32 %m +} + +define i1 @length255_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length255_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length255_eq: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $255, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: setne %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length255_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $255, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length255_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: pushq %rax +; X64-AVX2-NEXT: movl $255, %edx +; X64-AVX2-NEXT: callq memcmp +; X64-AVX2-NEXT: testl %eax, %eax +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: popq %rcx +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length255_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 191(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpneqb 191(%rsi), %zmm3, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb 128(%rsi), %zmm2, %k1 +; X64-AVX512BW-NEXT: korq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb 64(%rsi), %zmm1, %k1 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k2 +; X64-AVX512BW-NEXT: korq %k1, %k2, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length255_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 191(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpneqd 191(%rsi), %zmm3, %k0 +; X64-AVX512F-NEXT: vpcmpneqd 128(%rsi), %zmm2, %k1 +; X64-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k1 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k2 +; X64-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length255_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: pushq %rax +; X64-MIC-AVX2-NEXT: movl $255, %edx +; X64-MIC-AVX2-NEXT: callq memcmp +; X64-MIC-AVX2-NEXT: testl %eax, %eax +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: popq %rcx +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length255_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-MIC-AVX512F-NEXT: vmovdqu64 191(%rdi), %zmm3 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 191(%rsi), %zmm3, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 128(%rsi), %zmm2, %k1 +; X64-MIC-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k2 +; X64-MIC-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 255) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length255_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length255_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length255_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $255, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 255) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length255_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length255_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length255_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $255, %edx +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 255) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length255_eq_const(i8* %X) nounwind { +; X86-LABEL: length255_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $255 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length255_eq_const: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $.L.str, %esi +; X64-SSE-NEXT: movl $255, %edx +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: sete %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length255_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $.L.str, %esi +; X64-AVX1-NEXT: movl $255, %edx +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length255_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: pushq %rax +; X64-AVX2-NEXT: movl $.L.str, %esi +; X64-AVX2-NEXT: movl $255, %edx +; X64-AVX2-NEXT: callq memcmp +; X64-AVX2-NEXT: testl %eax, %eax +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: popq %rcx +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length255_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 191(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm3, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-AVX512BW-NEXT: korq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm1, %k1 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k2 +; X64-AVX512BW-NEXT: korq %k1, %k2, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length255_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 191(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm3, %k0 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k1 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k2 +; X64-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length255_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: pushq %rax +; X64-MIC-AVX2-NEXT: movl $.L.str, %esi +; X64-MIC-AVX2-NEXT: movl $255, %edx +; X64-MIC-AVX2-NEXT: callq memcmp +; X64-MIC-AVX2-NEXT: testl %eax, %eax +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: popq %rcx +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length255_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-MIC-AVX512F-NEXT: vmovdqu64 191(%rdi), %zmm3 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm3, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-MIC-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k2 +; X64-MIC-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 255) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length256(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length256: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length256: +; X64: # %bb.0: +; X64-NEXT: movl $256, %edx # imm = 0x100 +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 256) nounwind + ret i32 %m +} + +define i1 @length256_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length256_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length256_eq: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $256, %edx # imm = 0x100 +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: setne %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length256_eq: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $256, %edx # imm = 0x100 +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: setne %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length256_eq: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: pushq %rax +; X64-AVX2-NEXT: movl $256, %edx # imm = 0x100 +; X64-AVX2-NEXT: callq memcmp +; X64-AVX2-NEXT: testl %eax, %eax +; X64-AVX2-NEXT: setne %al +; X64-AVX2-NEXT: popq %rcx +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length256_eq: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 192(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpneqb 192(%rsi), %zmm3, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb 128(%rsi), %zmm2, %k1 +; X64-AVX512BW-NEXT: korq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb 64(%rsi), %zmm1, %k1 +; X64-AVX512BW-NEXT: vpcmpneqb (%rsi), %zmm0, %k2 +; X64-AVX512BW-NEXT: korq %k1, %k2, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: setne %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length256_eq: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 192(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpneqd 192(%rsi), %zmm3, %k0 +; X64-AVX512F-NEXT: vpcmpneqd 128(%rsi), %zmm2, %k1 +; X64-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k1 +; X64-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k2 +; X64-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: setne %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length256_eq: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: pushq %rax +; X64-MIC-AVX2-NEXT: movl $256, %edx # imm = 0x100 +; X64-MIC-AVX2-NEXT: callq memcmp +; X64-MIC-AVX2-NEXT: testl %eax, %eax +; X64-MIC-AVX2-NEXT: setne %al +; X64-MIC-AVX2-NEXT: popq %rcx +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length256_eq: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-MIC-AVX512F-NEXT: vmovdqu64 192(%rdi), %zmm3 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 192(%rsi), %zmm3, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 128(%rsi), %zmm2, %k1 +; X64-MIC-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd 64(%rsi), %zmm1, %k1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd (%rsi), %zmm0, %k2 +; X64-MIC-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: setne %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 256) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length256_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length256_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length256_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $256, %edx # imm = 0x100 +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 256) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length256_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length256_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length256_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $256, %edx # imm = 0x100 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 256) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length256_eq_const(i8* %X) nounwind { +; X86-LABEL: length256_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $256 # imm = 0x100 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-SSE-LABEL: length256_eq_const: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: pushq %rax +; X64-SSE-NEXT: movl $.L.str, %esi +; X64-SSE-NEXT: movl $256, %edx # imm = 0x100 +; X64-SSE-NEXT: callq memcmp +; X64-SSE-NEXT: testl %eax, %eax +; X64-SSE-NEXT: sete %al +; X64-SSE-NEXT: popq %rcx +; X64-SSE-NEXT: retq +; +; X64-AVX1-LABEL: length256_eq_const: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: pushq %rax +; X64-AVX1-NEXT: movl $.L.str, %esi +; X64-AVX1-NEXT: movl $256, %edx # imm = 0x100 +; X64-AVX1-NEXT: callq memcmp +; X64-AVX1-NEXT: testl %eax, %eax +; X64-AVX1-NEXT: sete %al +; X64-AVX1-NEXT: popq %rcx +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: length256_eq_const: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: pushq %rax +; X64-AVX2-NEXT: movl $.L.str, %esi +; X64-AVX2-NEXT: movl $256, %edx # imm = 0x100 +; X64-AVX2-NEXT: callq memcmp +; X64-AVX2-NEXT: testl %eax, %eax +; X64-AVX2-NEXT: sete %al +; X64-AVX2-NEXT: popq %rcx +; X64-AVX2-NEXT: retq +; +; X64-AVX512BW-LABEL: length256_eq_const: +; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512BW-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512BW-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512BW-NEXT: vmovdqu64 192(%rdi), %zmm3 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm3, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-AVX512BW-NEXT: korq %k0, %k1, %k0 +; X64-AVX512BW-NEXT: vpcmpneqb .L.str+{{.*}}(%rip), %zmm1, %k1 +; X64-AVX512BW-NEXT: vpcmpneqb {{.*}}(%rip), %zmm0, %k2 +; X64-AVX512BW-NEXT: korq %k1, %k2, %k1 +; X64-AVX512BW-NEXT: kortestq %k0, %k1 +; X64-AVX512BW-NEXT: sete %al +; X64-AVX512BW-NEXT: vzeroupper +; X64-AVX512BW-NEXT: retq +; +; X64-AVX512F-LABEL: length256_eq_const: +; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-AVX512F-NEXT: vmovdqu64 192(%rdi), %zmm3 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm3, %k0 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k1 +; X64-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k2 +; X64-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-AVX512F-NEXT: kortestw %k0, %k1 +; X64-AVX512F-NEXT: sete %al +; X64-AVX512F-NEXT: vzeroupper +; X64-AVX512F-NEXT: retq +; +; X64-MIC-AVX2-LABEL: length256_eq_const: +; X64-MIC-AVX2: # %bb.0: +; X64-MIC-AVX2-NEXT: pushq %rax +; X64-MIC-AVX2-NEXT: movl $.L.str, %esi +; X64-MIC-AVX2-NEXT: movl $256, %edx # imm = 0x100 +; X64-MIC-AVX2-NEXT: callq memcmp +; X64-MIC-AVX2-NEXT: testl %eax, %eax +; X64-MIC-AVX2-NEXT: sete %al +; X64-MIC-AVX2-NEXT: popq %rcx +; X64-MIC-AVX2-NEXT: retq +; +; X64-MIC-AVX512F-LABEL: length256_eq_const: +; X64-MIC-AVX512F: # %bb.0: +; X64-MIC-AVX512F-NEXT: vmovdqu64 (%rdi), %zmm0 +; X64-MIC-AVX512F-NEXT: vmovdqu64 64(%rdi), %zmm1 +; X64-MIC-AVX512F-NEXT: vmovdqu64 128(%rdi), %zmm2 +; X64-MIC-AVX512F-NEXT: vmovdqu64 192(%rdi), %zmm3 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm3, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm2, %k1 +; X64-MIC-AVX512F-NEXT: korw %k0, %k1, %k0 +; X64-MIC-AVX512F-NEXT: vpcmpneqd .L.str+{{.*}}(%rip), %zmm1, %k1 +; X64-MIC-AVX512F-NEXT: vpcmpneqd {{.*}}(%rip), %zmm0, %k2 +; X64-MIC-AVX512F-NEXT: korw %k1, %k2, %k1 +; X64-MIC-AVX512F-NEXT: kortestw %k0, %k1 +; X64-MIC-AVX512F-NEXT: sete %al +; X64-MIC-AVX512F-NEXT: vzeroupper +; X64-MIC-AVX512F-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 256) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length384(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length384: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $384 # imm = 0x180 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length384: +; X64: # %bb.0: +; X64-NEXT: movl $384, %edx # imm = 0x180 +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 384) nounwind + ret i32 %m +} + +define i1 @length384_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length384_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $384 # imm = 0x180 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length384_eq: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $384, %edx # imm = 0x180 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 384) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length384_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length384_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $384 # imm = 0x180 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length384_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $384, %edx # imm = 0x180 +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 384) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length384_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length384_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $384 # imm = 0x180 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length384_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $384, %edx # imm = 0x180 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 384) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length384_eq_const(i8* %X) nounwind { +; X86-LABEL: length384_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $384 # imm = 0x180 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length384_eq_const: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $.L.str, %esi +; X64-NEXT: movl $384, %edx # imm = 0x180 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 384) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length511(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length511: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $511 # imm = 0x1FF +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length511: +; X64: # %bb.0: +; X64-NEXT: movl $511, %edx # imm = 0x1FF +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 511) nounwind + ret i32 %m +} + +define i1 @length511_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length511_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $511 # imm = 0x1FF +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length511_eq: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $511, %edx # imm = 0x1FF +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 511) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length511_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length511_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $511 # imm = 0x1FF +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length511_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $511, %edx # imm = 0x1FF +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 511) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length511_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length511_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $511 # imm = 0x1FF +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length511_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $511, %edx # imm = 0x1FF +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 511) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length511_eq_const(i8* %X) nounwind { +; X86-LABEL: length511_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $511 # imm = 0x1FF +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length511_eq_const: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $.L.str, %esi +; X64-NEXT: movl $511, %edx # imm = 0x1FF +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 511) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +define i32 @length512(i8* %X, i8* %Y) nounwind { +; X86-LABEL: length512: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: length512: +; X64: # %bb.0: +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 512) nounwind + ret i32 %m +} + +define i1 @length512_eq(i8* %x, i8* %y) nounwind { +; X86-LABEL: length512_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: length512_eq: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setne %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 512) nounwind + %cmp = icmp ne i32 %call, 0 + ret i1 %cmp +} + +define i1 @length512_lt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length512_lt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: shrl $31, %eax +; X86-NEXT: # kill: def $al killed $al killed $eax +; X86-NEXT: retl +; +; X64-LABEL: length512_lt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: callq memcmp +; X64-NEXT: shrl $31, %eax +; X64-NEXT: # kill: def $al killed $al killed $eax +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 512) nounwind + %cmp = icmp slt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length512_gt(i8* %x, i8* %y) nounwind { +; X86-LABEL: length512_gt: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: setg %al +; X86-NEXT: retl +; +; X64-LABEL: length512_gt: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: setg %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 512) nounwind + %cmp = icmp sgt i32 %call, 0 + ret i1 %cmp +} + +define i1 @length512_eq_const(i8* %X) nounwind { +; X86-LABEL: length512_eq_const: +; X86: # %bb.0: +; X86-NEXT: pushl $0 +; X86-NEXT: pushl $512 # imm = 0x200 +; X86-NEXT: pushl $.L.str +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: length512_eq_const: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movl $.L.str, %esi +; X64-NEXT: movl $512, %edx # imm = 0x200 +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([513 x i8], [513 x i8]* @.str, i32 0, i32 0), i64 512) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; This checks that we do not do stupid things with huge sizes. +define i32 @huge_length(i8* %X, i8* %Y) nounwind { +; X86-LABEL: huge_length: +; X86: # %bb.0: +; X86-NEXT: pushl $2147483647 # imm = 0x7FFFFFFF +; X86-NEXT: pushl $-1 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: retl +; +; X64-LABEL: huge_length: +; X64: # %bb.0: +; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind + ret i32 %m +} + +define i1 @huge_length_eq(i8* %X, i8* %Y) nounwind { +; X86-LABEL: huge_length_eq: +; X86: # %bb.0: +; X86-NEXT: pushl $2147483647 # imm = 0x7FFFFFFF +; X86-NEXT: pushl $-1 +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: huge_length_eq: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} + +; This checks non-constant sizes. +define i32 @nonconst_length(i8* %X, i8* %Y, i64 %size) nounwind { +; X86-LABEL: nonconst_length: +; X86: # %bb.0: +; X86-NEXT: jmp memcmp # TAILCALL +; +; X64-LABEL: nonconst_length: +; X64: # %bb.0: +; X64-NEXT: jmp memcmp # TAILCALL + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind + ret i32 %m +} + +define i1 @nonconst_length_eq(i8* %X, i8* %Y, i64 %size) nounwind { +; X86-LABEL: nonconst_length_eq: +; X86: # %bb.0: +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: pushl {{[0-9]+}}(%esp) +; X86-NEXT: calll memcmp +; X86-NEXT: addl $16, %esp +; X86-NEXT: testl %eax, %eax +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: nonconst_length_eq: +; X64: # %bb.0: +; X64-NEXT: pushq %rax +; X64-NEXT: callq memcmp +; X64-NEXT: testl %eax, %eax +; X64-NEXT: sete %al +; X64-NEXT: popq %rcx +; X64-NEXT: retq + %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind + %c = icmp eq i32 %m, 0 + ret i1 %c +} diff --git a/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll b/test/Transforms/ExpandMemCmp/X86/memcmp.ll --- a/llvm/test/Transforms/ExpandMemCmp/X86/memcmp.ll +++ b/test/Transforms/ExpandMemCmp/X86/memcmp.ll @@ -41,8 +41,8 @@ ; ALL-NEXT: [[TMP9:%.*]] = icmp eq i16 [[TMP7]], [[TMP8]] ; ALL-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] ; ALL: loadbb1: -; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 2 -; ALL-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i8 2 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; ALL-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]] ; ALL-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]] ; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 @@ -95,8 +95,8 @@ ; ALL-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]] ; ALL-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] ; ALL: loadbb1: -; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 4 -; ALL-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; ALL-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; ALL-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]] ; ALL-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]] ; ALL-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 @@ -130,9 +130,9 @@ ; ALL-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]] ; ALL-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; ALL: loadbb1: -; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; ALL-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; ALL-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16* -; ALL-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; ALL-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; ALL-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i16* ; ALL-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP11]] ; ALL-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]] @@ -178,9 +178,9 @@ ; X32-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP7]], [[TMP8]] ; X32-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X32: loadbb1: -; X32-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X32-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* -; X32-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X32-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i32* ; X32-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP11]] ; X32-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]] @@ -233,8 +233,8 @@ ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1:%.*]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: -; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8 -; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP12:%.*]] = load i8, i8* [[TMP10]] ; X64-NEXT: [[TMP13:%.*]] = load i8, i8* [[TMP11]] ; X64-NEXT: [[TMP14:%.*]] = zext i8 [[TMP12]] to i32 @@ -272,9 +272,9 @@ ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: -; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i16* -; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i16* ; X64-NEXT: [[TMP14:%.*]] = load i16, i16* [[TMP11]] ; X64-NEXT: [[TMP15:%.*]] = load i16, i16* [[TMP13]] @@ -324,9 +324,9 @@ ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: -; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* -; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i32* ; X64-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP11]] ; X64-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]] @@ -394,9 +394,9 @@ ; X64-NEXT: [[TMP9:%.*]] = icmp eq i64 [[TMP7]], [[TMP8]] ; X64-NEXT: br i1 [[TMP9]], label [[LOADBB1]], label [[RES_BLOCK:%.*]] ; X64: loadbb1: -; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64-NEXT: [[TMP10:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i64* -; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64-NEXT: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to i64* ; X64-NEXT: [[TMP14:%.*]] = load i64, i64* [[TMP11]] ; X64-NEXT: [[TMP15:%.*]] = load i64, i64* [[TMP13]] @@ -437,8 +437,8 @@ ; X32-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]] ; X32-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]] ; X32-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]] -; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 2 -; X32-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 2 +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; X32-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; X32-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X32-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X32-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i16 @@ -463,8 +463,8 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i16 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 2 -; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 2 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]] @@ -481,8 +481,8 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i16, i16* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i16, i16* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i16 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 2 -; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 2 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 2 +; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 2 ; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i16 @@ -526,8 +526,8 @@ ; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 -; X32-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X32-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X32-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X32-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X32-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i32 @@ -552,8 +552,8 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 -; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]] @@ -570,8 +570,8 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 -; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 +; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i32 @@ -597,9 +597,9 @@ ; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* -; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* ; X32-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]] ; X32-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]] @@ -625,9 +625,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]] @@ -645,9 +645,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]] @@ -674,9 +674,9 @@ ; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3 +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* -; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3 +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* ; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] ; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] @@ -700,9 +700,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] @@ -720,9 +720,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] @@ -747,9 +747,9 @@ ; X32-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP1]] ; X32-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP2]] ; X32-NEXT: [[TMP5:%.*]] = xor i32 [[TMP3]], [[TMP4]] -; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 4 +; X32-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 4 ; X32-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* -; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 4 +; X32-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 4 ; X32-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* ; X32-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] ; X32-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] @@ -797,8 +797,8 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8 -; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64_1LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_1LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X64_1LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X64_1LD-NEXT: [[TMP10:%.*]] = icmp ne i8 [[TMP8]], [[TMP9]] @@ -815,8 +815,8 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8 -; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 +; X64_2LD-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_2LD-NEXT: [[TMP8:%.*]] = load i8, i8* [[TMP6]] ; X64_2LD-NEXT: [[TMP9:%.*]] = load i8, i8* [[TMP7]] ; X64_2LD-NEXT: [[TMP10:%.*]] = zext i8 [[TMP8]] to i64 @@ -854,9 +854,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]] @@ -874,9 +874,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i16* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i16* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i16, i16* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i16, i16* [[TMP9]] @@ -915,9 +915,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -935,9 +935,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 3 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 3 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 3 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 3 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -974,9 +974,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] @@ -994,9 +994,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 8 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 8 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i32* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 8 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 8 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP9]] @@ -1035,9 +1035,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 5 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 5 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 5 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 5 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -1055,9 +1055,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 5 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 5 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 5 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 5 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -1094,9 +1094,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 6 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 6 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 6 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 6 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -1114,9 +1114,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 6 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 6 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 6 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 6 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -1153,9 +1153,9 @@ ; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i64 [[TMP3]], [[TMP4]] ; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]] ; X64_1LD: loadbb1: -; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 7 +; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 7 ; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 7 +; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 7 ; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_1LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_1LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]] @@ -1173,9 +1173,9 @@ ; X64_2LD-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]] ; X64_2LD-NEXT: [[TMP4:%.*]] = load i64, i64* [[TMP2]] ; X64_2LD-NEXT: [[TMP5:%.*]] = xor i64 [[TMP3]], [[TMP4]] -; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 7 +; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i64 7 ; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i64* -; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 7 +; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i64 7 ; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i64* ; X64_2LD-NEXT: [[TMP10:%.*]] = load i64, i64* [[TMP7]] ; X64_2LD-NEXT: [[TMP11:%.*]] = load i64, i64* [[TMP9]]