diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -982,6 +982,15 @@ X86II::isX86_64ExtendedReg(OutMI.getOperand(2).getReg())) std::swap(OutMI.getOperand(1), OutMI.getOperand(2)); } + // Add an REP prefix to BSF instructions so that new processors can + // recognize as TZCNT, which has better performance than BSF. + if (X86::isBSF(OutMI.getOpcode()) && !MF.getFunction().hasOptSize()) { + // BSF and TZCNT have different interpretations on ZF bit. So make sure + // it won't be used latter. + const MachineOperand *FlagDef = MI->findRegisterDefOperand(X86::EFLAGS); + if (FlagDef && FlagDef->isDead()) + OutMI.setFlags(X86::IP_HAS_REPEAT); + } break; } } diff --git a/llvm/test/CodeGen/X86/clz.ll b/llvm/test/CodeGen/X86/clz.ll --- a/llvm/test/CodeGen/X86/clz.ll +++ b/llvm/test/CodeGen/X86/clz.ll @@ -18,13 +18,13 @@ define i8 @cttz_i8(i8 %x) { ; X86-LABEL: cttz_i8: ; X86: # %bb.0: -; X86-NEXT: bsfl {{[0-9]+}}(%esp), %eax +; X86-NEXT: rep bsfl {{[0-9]+}}(%esp), %eax ; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; ; X64-LABEL: cttz_i8: ; X64: # %bb.0: -; X64-NEXT: bsfl %edi, %eax +; X64-NEXT: rep bsfl %edi, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; @@ -46,12 +46,12 @@ define i16 @cttz_i16(i16 %x) { ; X86-LABEL: cttz_i16: ; X86: # %bb.0: -; X86-NEXT: bsfw {{[0-9]+}}(%esp), %ax +; X86-NEXT: rep bsfw {{[0-9]+}}(%esp), %ax ; X86-NEXT: retl ; ; X64-LABEL: cttz_i16: ; X64: # %bb.0: -; X64-NEXT: bsfw %di, %ax +; X64-NEXT: rep bsfw %di, %ax ; X64-NEXT: retq ; ; X86-CLZ-LABEL: cttz_i16: @@ -72,12 +72,12 @@ define i32 @cttz_i32(i32 %x) { ; X86-LABEL: cttz_i32: ; X86: # %bb.0: -; X86-NEXT: bsfl {{[0-9]+}}(%esp), %eax +; X86-NEXT: rep bsfl {{[0-9]+}}(%esp), %eax ; X86-NEXT: retl ; ; X64-LABEL: cttz_i32: ; X64: # %bb.0: -; X64-NEXT: bsfl %edi, %eax +; X64-NEXT: rep bsfl %edi, %eax ; X64-NEXT: retq ; ; X86-CLZ-LABEL: cttz_i32: @@ -100,20 +100,20 @@ ; X86-NOCMOV-NEXT: testl %eax, %eax ; X86-NOCMOV-NEXT: jne .LBB3_1 ; X86-NOCMOV-NEXT: # %bb.2: -; X86-NOCMOV-NEXT: bsfl {{[0-9]+}}(%esp), %eax +; X86-NOCMOV-NEXT: rep bsfl {{[0-9]+}}(%esp), %eax ; X86-NOCMOV-NEXT: addl $32, %eax ; X86-NOCMOV-NEXT: xorl %edx, %edx ; X86-NOCMOV-NEXT: retl ; X86-NOCMOV-NEXT: .LBB3_1: -; X86-NOCMOV-NEXT: bsfl %eax, %eax +; X86-NOCMOV-NEXT: rep bsfl %eax, %eax ; X86-NOCMOV-NEXT: xorl %edx, %edx ; X86-NOCMOV-NEXT: retl ; ; X86-CMOV-LABEL: cttz_i64: ; X86-CMOV: # %bb.0: ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-CMOV-NEXT: bsfl %ecx, %edx -; X86-CMOV-NEXT: bsfl {{[0-9]+}}(%esp), %eax +; X86-CMOV-NEXT: rep bsfl %ecx, %edx +; X86-CMOV-NEXT: rep bsfl {{[0-9]+}}(%esp), %eax ; X86-CMOV-NEXT: addl $32, %eax ; X86-CMOV-NEXT: testl %ecx, %ecx ; X86-CMOV-NEXT: cmovnel %edx, %eax @@ -122,7 +122,7 @@ ; ; X64-LABEL: cttz_i64: ; X64: # %bb.0: -; X64-NEXT: bsfq %rdi, %rax +; X64-NEXT: rep bsfq %rdi, %rax ; X64-NEXT: retq ; ; X86-CLZ-LABEL: cttz_i64: @@ -517,7 +517,7 @@ ; X86-NEXT: je .LBB12_1 ; X86-NEXT: # %bb.2: # %cond.false ; X86-NEXT: movzbl %al, %eax -; X86-NEXT: bsfl %eax, %eax +; X86-NEXT: rep bsfl %eax, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; X86-NEXT: .LBB12_1: @@ -531,7 +531,7 @@ ; X64-NEXT: je .LBB12_1 ; X64-NEXT: # %bb.2: # %cond.false ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: bsfl %eax, %eax +; X64-NEXT: rep bsfl %eax, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; X64-NEXT: .LBB12_1: @@ -565,7 +565,7 @@ ; X86-NEXT: testw %ax, %ax ; X86-NEXT: je .LBB13_1 ; X86-NEXT: # %bb.2: # %cond.false -; X86-NEXT: bsfw %ax, %ax +; X86-NEXT: rep bsfw %ax, %ax ; X86-NEXT: retl ; X86-NEXT: .LBB13_1: ; X86-NEXT: movw $16, %ax @@ -576,7 +576,7 @@ ; X64-NEXT: testw %di, %di ; X64-NEXT: je .LBB13_1 ; X64-NEXT: # %bb.2: # %cond.false -; X64-NEXT: bsfw %di, %ax +; X64-NEXT: rep bsfw %di, %ax ; X64-NEXT: retq ; X64-NEXT: .LBB13_1: ; X64-NEXT: movw $16, %ax @@ -603,7 +603,7 @@ ; X86-NEXT: testl %eax, %eax ; X86-NEXT: je .LBB14_1 ; X86-NEXT: # %bb.2: # %cond.false -; X86-NEXT: bsfl %eax, %eax +; X86-NEXT: rep bsfl %eax, %eax ; X86-NEXT: retl ; X86-NEXT: .LBB14_1: ; X86-NEXT: movl $32, %eax @@ -614,7 +614,7 @@ ; X64-NEXT: testl %edi, %edi ; X64-NEXT: je .LBB14_1 ; X64-NEXT: # %bb.2: # %cond.false -; X64-NEXT: bsfl %edi, %eax +; X64-NEXT: rep bsfl %edi, %eax ; X64-NEXT: retq ; X64-NEXT: .LBB14_1: ; X64-NEXT: movl $32, %eax @@ -638,6 +638,7 @@ ; X86-NOCMOV-LABEL: cttz_i64_zero_test: ; X86-NOCMOV: # %bb.0: ; X86-NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NOCMOV-NOT: rep ; X86-NOCMOV-NEXT: bsfl {{[0-9]+}}(%esp), %edx ; X86-NOCMOV-NEXT: movl $32, %eax ; X86-NOCMOV-NEXT: je .LBB15_2 @@ -651,17 +652,19 @@ ; X86-NOCMOV-NEXT: xorl %edx, %edx ; X86-NOCMOV-NEXT: retl ; X86-NOCMOV-NEXT: .LBB15_3: -; X86-NOCMOV-NEXT: bsfl %ecx, %eax +; X86-NOCMOV-NEXT: rep bsfl %ecx, %eax ; X86-NOCMOV-NEXT: xorl %edx, %edx ; X86-NOCMOV-NEXT: retl ; ; X86-CMOV-LABEL: cttz_i64_zero_test: ; X86-CMOV: # %bb.0: ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-CMOV-NOT: rep ; X86-CMOV-NEXT: bsfl {{[0-9]+}}(%esp), %ecx ; X86-CMOV-NEXT: movl $32, %edx ; X86-CMOV-NEXT: cmovnel %ecx, %edx ; X86-CMOV-NEXT: addl $32, %edx +; X86-CMOV-NOT: rep ; X86-CMOV-NEXT: bsfl %eax, %eax ; X86-CMOV-NEXT: cmovel %edx, %eax ; X86-CMOV-NEXT: xorl %edx, %edx @@ -672,7 +675,7 @@ ; X64-NEXT: testq %rdi, %rdi ; X64-NEXT: je .LBB15_1 ; X64-NEXT: # %bb.2: # %cond.false -; X64-NEXT: bsfq %rdi, %rax +; X64-NEXT: rep bsfq %rdi, %rax ; X64-NEXT: retq ; X64-NEXT: .LBB15_1: ; X64-NEXT: movl $64, %eax @@ -822,7 +825,7 @@ ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: orb $2, %al ; X86-NEXT: movzbl %al, %eax -; X86-NEXT: bsfl %eax, %eax +; X86-NEXT: rep bsfl %eax, %eax ; X86-NEXT: # kill: def $al killed $al killed $eax ; X86-NEXT: retl ; @@ -830,7 +833,7 @@ ; X64: # %bb.0: ; X64-NEXT: orb $2, %dil ; X64-NEXT: movzbl %dil, %eax -; X64-NEXT: bsfl %eax, %eax +; X64-NEXT: rep bsfl %eax, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; @@ -983,12 +986,12 @@ ; X86-NOCMOV-NEXT: # %bb.2: ; X86-NOCMOV-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-NOCMOV-NEXT: orl {{[0-9]+}}(%esp), %eax -; X86-NOCMOV-NEXT: bsfl %eax, %eax +; X86-NOCMOV-NEXT: rep bsfl %eax, %eax ; X86-NOCMOV-NEXT: orl $32, %eax ; X86-NOCMOV-NEXT: xorl %edx, %edx ; X86-NOCMOV-NEXT: retl ; X86-NOCMOV-NEXT: .LBB22_1: -; X86-NOCMOV-NEXT: bsfl %eax, %eax +; X86-NOCMOV-NEXT: rep bsfl %eax, %eax ; X86-NOCMOV-NEXT: xorl %edx, %edx ; X86-NOCMOV-NEXT: retl ; @@ -997,8 +1000,8 @@ ; X86-CMOV-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-CMOV-NEXT: movl $-2147483648, %eax # imm = 0x80000000 ; X86-CMOV-NEXT: orl {{[0-9]+}}(%esp), %eax -; X86-CMOV-NEXT: bsfl %ecx, %edx -; X86-CMOV-NEXT: bsfl %eax, %eax +; X86-CMOV-NEXT: rep bsfl %ecx, %edx +; X86-CMOV-NEXT: rep bsfl %eax, %eax ; X86-CMOV-NEXT: orl $32, %eax ; X86-CMOV-NEXT: testl %ecx, %ecx ; X86-CMOV-NEXT: cmovnel %edx, %eax @@ -1009,7 +1012,7 @@ ; X64: # %bb.0: ; X64-NEXT: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000 ; X64-NEXT: orq %rdi, %rax -; X64-NEXT: bsfq %rax, %rax +; X64-NEXT: rep bsfq %rax, %rax ; X64-NEXT: retq ; ; X86-CLZ-LABEL: cttz_i64_zero_test_knownneverzero: @@ -1110,3 +1113,55 @@ %sext = sext i8 %load to i32 ret i32 %sext } + +define i32 @cttz_i32_osize(i32 %x) optsize { +; X86-LABEL: cttz_i32_osize: +; X86: # %bb.0: +; X86-NOT: rep +; X86-NEXT: bsfl {{[0-9]+}}(%esp), %eax +; X86-NEXT: retl +; +; X64-LABEL: cttz_i32_osize: +; X64: # %bb.0: +; X64-NOT: rep +; X64-NEXT: bsfl %edi, %eax +; X64-NEXT: retq +; +; X86-CLZ-LABEL: cttz_i32_osize: +; X86-CLZ: # %bb.0: +; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax +; X86-CLZ-NEXT: retl +; +; X64-CLZ-LABEL: cttz_i32_osize: +; X64-CLZ: # %bb.0: +; X64-CLZ-NEXT: tzcntl %edi, %eax +; X64-CLZ-NEXT: retq + %tmp = call i32 @llvm.cttz.i32( i32 %x, i1 true) + ret i32 %tmp +} + +define i32 @cttz_i32_msize(i32 %x) minsize { +; X86-LABEL: cttz_i32_msize: +; X86: # %bb.0: +; X86-NOT: rep +; X86-NEXT: bsfl {{[0-9]+}}(%esp), %eax +; X86-NEXT: retl +; +; X64-LABEL: cttz_i32_msize: +; X64: # %bb.0: +; X64-NOT: rep +; X64-NEXT: bsfl %edi, %eax +; X64-NEXT: retq +; +; X86-CLZ-LABEL: cttz_i32_msize: +; X86-CLZ: # %bb.0: +; X86-CLZ-NEXT: tzcntl {{[0-9]+}}(%esp), %eax +; X86-CLZ-NEXT: retl +; +; X64-CLZ-LABEL: cttz_i32_msize: +; X64-CLZ: # %bb.0: +; X64-CLZ-NEXT: tzcntl %edi, %eax +; X64-CLZ-NEXT: retq + %tmp = call i32 @llvm.cttz.i32( i32 %x, i1 true) + ret i32 %tmp +} diff --git a/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll b/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll --- a/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll +++ b/llvm/test/CodeGen/X86/peephole-na-phys-copy-folding.ll @@ -353,6 +353,7 @@ ; CHECK32-NEXT: testl %edx, %edx ; CHECK32-NEXT: setg %al ; CHECK32-NEXT: #APP +; CHECK32-NOT: rep ; CHECK32-NEXT: bsfl %edx, %edx ; CHECK32-NEXT: #NO_APP ; CHECK32-NEXT: movl %edx, (%ecx) @@ -364,6 +365,7 @@ ; CHECK64-NEXT: testl %ecx, %ecx ; CHECK64-NEXT: setg %al ; CHECK64-NEXT: #APP +; CHECK64-NOT: rep ; CHECK64-NEXT: bsfl %ecx, %ecx ; CHECK64-NEXT: #NO_APP ; CHECK64-NEXT: movl %ecx, (%rdi) diff --git a/llvm/test/CodeGen/X86/stack-folding-x86_64.ll b/llvm/test/CodeGen/X86/stack-folding-x86_64.ll --- a/llvm/test/CodeGen/X86/stack-folding-x86_64.ll +++ b/llvm/test/CodeGen/X86/stack-folding-x86_64.ll @@ -37,7 +37,7 @@ ; CHECK-NEXT: #APP ; CHECK-NEXT: nop ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: bsfl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload +; CHECK-NEXT: rep bsfl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Folded Reload ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: popq %r12 @@ -82,7 +82,7 @@ ; CHECK-NEXT: #APP ; CHECK-NEXT: nop ; CHECK-NEXT: #NO_APP -; CHECK-NEXT: bsfq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload +; CHECK-NEXT: rep bsfq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload ; CHECK-NEXT: popq %rbx ; CHECK-NEXT: .cfi_def_cfa_offset 48 ; CHECK-NEXT: popq %r12