diff --git a/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll b/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/cmp-shiftX-maskX.ll @@ -0,0 +1,458 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefixes=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64 + +define i1 @shr_to_shl_eq_i8_s2(i8 %x) { +; X86-LABEL: shr_to_shl_eq_i8_s2: +; X86: # %bb.0: +; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andb $63, %cl +; X86-NEXT: shrb $2, %al +; X86-NEXT: cmpb %al, %cl +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_eq_i8_s2: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andb $63, %al +; X64-NEXT: shrb $2, %dil +; X64-NEXT: cmpb %dil, %al +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i8 %x, 63 + %shr = lshr i8 %x, 2 + %r = icmp eq i8 %and, %shr + ret i1 %r +} + +define i1 @shl_to_shr_ne_i8_s7(i8 %x) { +; X86-LABEL: shl_to_shr_ne_i8_s7: +; X86: # %bb.0: +; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: shlb $7, %cl +; X86-NEXT: andb $-128, %al +; X86-NEXT: cmpb %al, %cl +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_ne_i8_s7: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: shlb $7, %al +; X64-NEXT: andb $-128, %dil +; X64-NEXT: cmpb %dil, %al +; X64-NEXT: setne %al +; X64-NEXT: retq + %shl = shl i8 %x, 7 + %and = and i8 %x, 128 + %r = icmp ne i8 %shl, %and + ret i1 %r +} + +define i1 @shr_to_shl_eq_i8_s1(i8 %x) { +; X86-LABEL: shr_to_shl_eq_i8_s1: +; X86: # %bb.0: +; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andb $127, %cl +; X86-NEXT: shrb %al +; X86-NEXT: cmpb %al, %cl +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_eq_i8_s1: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andb $127, %al +; X64-NEXT: shrb %dil +; X64-NEXT: cmpb %dil, %al +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i8 %x, 127 + %shr = lshr i8 %x, 1 + %r = icmp eq i8 %and, %shr + ret i1 %r +} + +define i1 @shr_to_shl_eq_i32_s3(i32 %x) { +; X86-LABEL: shr_to_shl_eq_i32_s3: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $536870911, %ecx # imm = 0x1FFFFFFF +; X86-NEXT: shrl $3, %eax +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_eq_i32_s3: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andl $536870911, %eax # imm = 0x1FFFFFFF +; X64-NEXT: shrl $3, %edi +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i32 %x, 536870911 + %shr = lshr i32 %x, 3 + %r = icmp eq i32 %and, %shr + ret i1 %r +} + +define i1 @shl_to_shr_eq_i32_s3_fail(i32 %x) { +; X86-LABEL: shl_to_shr_eq_i32_s3_fail: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $536870911, %ecx # imm = 0x1FFFFFFF +; X86-NEXT: shll $3, %eax +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_eq_i32_s3_fail: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andl $536870911, %eax # imm = 0x1FFFFFFF +; X64-NEXT: shll $3, %edi +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i32 %x, 536870911 + %shr = shl i32 %x, 3 + %r = icmp eq i32 %and, %shr + ret i1 %r +} + +define i1 @shl_to_shr_ne_i32_s16(i32 %x) { +; X86-LABEL: shl_to_shr_ne_i32_s16: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: shll $16, %ecx +; X86-NEXT: andl $-65536, %eax # imm = 0xFFFF0000 +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_ne_i32_s16: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: shll $16, %eax +; X64-NEXT: andl $-65536, %edi # imm = 0xFFFF0000 +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq + %shl = shl i32 %x, 16 + %and = and i32 %x, 4294901760 + %r = icmp ne i32 %shl, %and + ret i1 %r +} + +define i1 @shl_to_shr_ne_i32_s16_fail(i32 %x) { +; X86-LABEL: shl_to_shr_ne_i32_s16_fail: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: shll $16, %ecx +; X86-NEXT: andl $2147450880, %eax # imm = 0x7FFF8000 +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_ne_i32_s16_fail: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: shll $16, %eax +; X64-NEXT: andl $2147450880, %edi # imm = 0x7FFF8000 +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq + %shl = shl i32 %x, 16 + %and = and i32 %x, 2147450880 + %r = icmp ne i32 %shl, %and + ret i1 %r +} + +define i1 @shr_to_shl_eq_i16_s1(i16 %x) { +; X86-LABEL: shr_to_shl_eq_i16_s1: +; X86: # %bb.0: +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $32767, %ecx # imm = 0x7FFF +; X86-NEXT: shrl %eax +; X86-NEXT: cmpw %ax, %cx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_eq_i16_s1: +; X64: # %bb.0: +; X64-NEXT: movzwl %di, %eax +; X64-NEXT: andl $32767, %edi # imm = 0x7FFF +; X64-NEXT: shrl %eax +; X64-NEXT: cmpw %ax, %di +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i16 %x, 32767 + %shr = lshr i16 %x, 1 + %r = icmp eq i16 %and, %shr + ret i1 %r +} + +define i1 @shr_to_shl_eq_i16_s1_fail(i16 %x) { +; X86-LABEL: shr_to_shl_eq_i16_s1_fail: +; X86: # %bb.0: +; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $32766, %ecx # imm = 0x7FFE +; X86-NEXT: shrl %eax +; X86-NEXT: cmpw %ax, %cx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_eq_i16_s1_fail: +; X64: # %bb.0: +; X64-NEXT: movzwl %di, %eax +; X64-NEXT: andl $32766, %edi # imm = 0x7FFE +; X64-NEXT: shrl %eax +; X64-NEXT: cmpw %ax, %di +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i16 %x, 32766 + %shr = lshr i16 %x, 1 + %r = icmp eq i16 %and, %shr + ret i1 %r +} + +define i1 @shl_to_shr_eq_i64_s44(i64 %x) { +; X86-LABEL: shl_to_shr_eq_i64_s44: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shll $12, %eax +; X86-NEXT: movl $-4096, %ecx # imm = 0xF000 +; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: xorl %eax, %ecx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_eq_i64_s44: +; X64: # %bb.0: +; X64-NEXT: movabsq $-17592186044416, %rax # imm = 0xFFFFF00000000000 +; X64-NEXT: andq %rdi, %rax +; X64-NEXT: shlq $44, %rdi +; X64-NEXT: cmpq %rax, %rdi +; X64-NEXT: sete %al +; X64-NEXT: retq + %shl = shl i64 %x, 44 + %and = and i64 %x, 18446726481523507200 + %r = icmp eq i64 %shl, %and + ret i1 %r +} + +define i1 @shr_to_shl_ne_i64_s32(i64 %x) { +; X86-LABEL: shr_to_shl_ne_i64_s32: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_ne_i64_s32: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: shrq $32, %rdi +; X64-NEXT: cmpq %rdi, %rax +; X64-NEXT: setne %al +; X64-NEXT: retq + %and = and i64 %x, 4294967295 + %shr = lshr i64 %x, 32 + %r = icmp ne i64 %and, %shr + ret i1 %r +} + +define i1 @ashr_to_shl_ne_i64_s32_fail(i64 %x) { +; X86-LABEL: ashr_to_shl_ne_i64_s32_fail: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: sarl $31, %ecx +; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax +; X86-NEXT: orl %ecx, %eax +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: ashr_to_shl_ne_i64_s32_fail: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: sarq $32, %rdi +; X64-NEXT: cmpq %rdi, %rax +; X64-NEXT: setne %al +; X64-NEXT: retq + %and = and i64 %x, 4294967295 + %shr = ashr i64 %x, 32 + %r = icmp ne i64 %and, %shr + ret i1 %r +} + +define i1 @shl_to_shr_eq_i64_s63(i64 %x) { +; X86-LABEL: shl_to_shr_eq_i64_s63: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shll $31, %eax +; X86-NEXT: movl $-2147483648, %ecx # imm = 0x80000000 +; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: xorl %eax, %ecx +; X86-NEXT: sete %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_eq_i64_s63: +; X64: # %bb.0: +; X64-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000 +; X64-NEXT: andq %rdi, %rax +; X64-NEXT: shlq $63, %rdi +; X64-NEXT: cmpq %rax, %rdi +; X64-NEXT: sete %al +; X64-NEXT: retq + %shl = shl i64 %x, 63 + %and = and i64 %x, 9223372036854775808 + %r = icmp eq i64 %shl, %and + ret i1 %r +} + +define i1 @shl_to_shr_eq_i64_s63_fail(i64 %x) { +; X86-LABEL: shl_to_shr_eq_i64_s63_fail: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: shll $31, %eax +; X86-NEXT: movl $-2147483648, %ecx # imm = 0x80000000 +; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: cmpl %ecx, %eax +; X86-NEXT: seta %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_eq_i64_s63_fail: +; X64: # %bb.0: +; X64-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000 +; X64-NEXT: andq %rdi, %rax +; X64-NEXT: shlq $63, %rdi +; X64-NEXT: cmpq %rax, %rdi +; X64-NEXT: seta %al +; X64-NEXT: retq + %shl = shl i64 %x, 63 + %and = and i64 %x, 9223372036854775808 + %r = icmp ugt i64 %shl, %and + ret i1 %r +} + +define i1 @shr_to_shl_eq_i64_s7(i64 %x) { +; X86-LABEL: shr_to_shl_eq_i64_s7: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: .cfi_offset %esi, -8 +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, %edx +; X86-NEXT: andl $33554431, %edx # imm = 0x1FFFFFF +; X86-NEXT: movl %ecx, %esi +; X86-NEXT: shldl $25, %eax, %esi +; X86-NEXT: shrl $7, %ecx +; X86-NEXT: xorl %edx, %ecx +; X86-NEXT: xorl %eax, %esi +; X86-NEXT: orl %ecx, %esi +; X86-NEXT: sete %al +; X86-NEXT: popl %esi +; X86-NEXT: .cfi_def_cfa_offset 4 +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_eq_i64_s7: +; X64: # %bb.0: +; X64-NEXT: movabsq $144115188075855871, %rax # imm = 0x1FFFFFFFFFFFFFF +; X64-NEXT: andq %rdi, %rax +; X64-NEXT: shrq $7, %rdi +; X64-NEXT: cmpq %rdi, %rax +; X64-NEXT: sete %al +; X64-NEXT: retq + %and = and i64 %x, 144115188075855871 + %shr = lshr i64 %x, 7 + %r = icmp eq i64 %and, %shr + ret i1 %r +} + +define i1 @shl_to_shr_ne_i32_s24(i32 %x) { +; X86-LABEL: shl_to_shr_ne_i32_s24: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: shll $24, %ecx +; X86-NEXT: andl $-16777216, %eax # imm = 0xFF000000 +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shl_to_shr_ne_i32_s24: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: shll $24, %eax +; X64-NEXT: andl $-16777216, %edi # imm = 0xFF000000 +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq + %shl = shl i32 %x, 24 + %and = and i32 %x, 4278190080 + %r = icmp ne i32 %shl, %and + ret i1 %r +} + +define i1 @shr_to_shl_ne_i32_s24_fail(i32 %x) { +; X86-LABEL: shr_to_shl_ne_i32_s24_fail: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: shrl $24, %ecx +; X86-NEXT: andl $-16777216, %eax # imm = 0xFF000000 +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_ne_i32_s24_fail: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: shrl $24, %eax +; X64-NEXT: andl $-16777216, %edi # imm = 0xFF000000 +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq + %shl = lshr i32 %x, 24 + %and = and i32 %x, 4278190080 + %r = icmp ne i32 %shl, %and + ret i1 %r +} + +define i1 @shr_to_shl_ne_i32_s8(i32 %x) { +; X86-LABEL: shr_to_shl_ne_i32_s8: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl %eax, %ecx +; X86-NEXT: andl $16777215, %ecx # imm = 0xFFFFFF +; X86-NEXT: shrl $8, %eax +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: setne %al +; X86-NEXT: retl +; +; X64-LABEL: shr_to_shl_ne_i32_s8: +; X64: # %bb.0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: andl $16777215, %eax # imm = 0xFFFFFF +; X64-NEXT: shrl $8, %edi +; X64-NEXT: cmpl %edi, %eax +; X64-NEXT: setne %al +; X64-NEXT: retq + %and = and i32 %x, 16777215 + %shr = lshr i32 %x, 8 + %r = icmp ne i32 %and, %shr + ret i1 %r +}