diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -1260,6 +1260,7 @@ FeatureNOPL, Feature64Bit, FeatureSlow3OpsLEA, + FeatureSlowDivide64, FeatureSlowIncDec, FeatureMacroFusion, FeatureInsertVZEROUPPER diff --git a/llvm/test/CodeGen/X86/bypass-slow-division-tune.ll b/llvm/test/CodeGen/X86/bypass-slow-division-tune.ll --- a/llvm/test/CodeGen/X86/bypass-slow-division-tune.ll +++ b/llvm/test/CodeGen/X86/bypass-slow-division-tune.ll @@ -66,9 +66,20 @@ ; X64-LABEL: div64: ; X64: # %bb.0: # %entry ; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %rcx +; X64-NEXT: orq %rsi, %rcx +; X64-NEXT: shrq $32, %rcx +; X64-NEXT: je .LBB1_1 +; X64-NEXT: # %bb.2: ; X64-NEXT: cqto ; X64-NEXT: idivq %rsi ; X64-NEXT: retq +; X64-NEXT: .LBB1_1: +; X64-NEXT: # kill: def $eax killed $eax killed $rax +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: divl %esi +; X64-NEXT: # kill: def $eax killed $eax def $rax +; X64-NEXT: retq ; ; SLM-LABEL: div64: ; SLM: # %bb.0: # %entry @@ -178,9 +189,20 @@ ; X64-LABEL: div64_hugews: ; X64: # %bb.0: ; X64-NEXT: movq %rdi, %rax +; X64-NEXT: movq %rdi, %rcx +; X64-NEXT: orq %rsi, %rcx +; X64-NEXT: shrq $32, %rcx +; X64-NEXT: je .LBB4_1 +; X64-NEXT: # %bb.2: ; X64-NEXT: cqto ; X64-NEXT: idivq %rsi ; X64-NEXT: retq +; X64-NEXT: .LBB4_1: +; X64-NEXT: # kill: def $eax killed $eax killed $rax +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: divl %esi +; X64-NEXT: # kill: def $eax killed $eax def $rax +; X64-NEXT: retq ; ; SLM-LABEL: div64_hugews: ; SLM: # %bb.0: