Index: test/CodeGen/X86/lea-opt-cse1.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/lea-opt-cse1.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86 + +%struct.SA = type { i32 , i32 , i32 , i32 ,i32 } + +define void @test_func(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr { +; X64-LABEL: test_func: +; X64: # BB#0: # %entry +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl 16(%rdi), %ecx +; X64-NEXT: leal (%rax,%rcx), %edx +; X64-NEXT: leal 1(%rax,%rcx), %eax +; X64-NEXT: movl %eax, 12(%rdi) +; X64-NEXT: leal 1(%rcx,%rdx), %eax +; X64-NEXT: movl %eax, 16(%rdi) +; X64-NEXT: retq +; +; X86-LABEL: test_func: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %esi +; X86-NEXT: .Lcfi0: +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: .Lcfi1: +; X86-NEXT: .cfi_offset %esi, -8 +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %ecx +; X86-NEXT: movl 16(%eax), %edx +; X86-NEXT: leal 1(%ecx,%edx), %esi +; X86-NEXT: addl %edx, %ecx +; X86-NEXT: movl %esi, 12(%eax) +; X86-NEXT: leal 1(%edx,%ecx), %ecx +; X86-NEXT: movl %ecx, 16(%eax) +; X86-NEXT: popl %esi +; X86-NEXT: retl + entry: + %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 + %0 = load i32, i32* %h0, align 8 + %h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3 + %h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4 + %1 = load i32, i32* %h4, align 8 + %add = add i32 %0, 1 + %add4 = add i32 %add, %1 + store i32 %add4, i32* %h3, align 4 + %add29 = add i32 %add4 , %1 + store i32 %add29, i32* %h4, align 8 + ret void +} Index: test/CodeGen/X86/lea-opt-cse2.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/lea-opt-cse2.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py + +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86 +%struct.SA = type { i32 , i32 , i32 , i32 , i32}; + +define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 { +; X64-LABEL: foo: +; X64: # BB#0: # %entry +; X64-NEXT: .p2align 4, 0x90 +; X64-NEXT: .LBB0_1: # %loop +; X64-NEXT: # =>This Inner Loop Header: Depth=1 +; X64-NEXT: movl (%rdi), %eax +; X64-NEXT: movl 16(%rdi), %ecx +; X64-NEXT: leal 1(%rax,%rcx), %edx +; X64-NEXT: movl %edx, 12(%rdi) +; X64-NEXT: decl %esi +; X64-NEXT: jne .LBB0_1 +; X64-NEXT: # BB#2: # %exit +; X64-NEXT: addl %ecx, %eax +; X64-NEXT: leal 1(%rcx,%rax), %eax +; X64-NEXT: movl %eax, 16(%rdi) +; X64-NEXT: retq +; +; X86-LABEL: foo: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %edi +; X86-NEXT: .Lcfi0: +; X86-NEXT: .cfi_def_cfa_offset 8 +; X86-NEXT: pushl %esi +; X86-NEXT: .Lcfi1: +; X86-NEXT: .cfi_def_cfa_offset 12 +; X86-NEXT: .Lcfi2: +; X86-NEXT: .cfi_offset %esi, -12 +; X86-NEXT: .Lcfi3: +; X86-NEXT: .cfi_offset %edi, -8 +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: .p2align 4, 0x90 +; X86-NEXT: .LBB0_1: # %loop +; X86-NEXT: # =>This Inner Loop Header: Depth=1 +; X86-NEXT: movl (%eax), %edx +; X86-NEXT: movl 16(%eax), %esi +; X86-NEXT: leal 1(%edx,%esi), %edi +; X86-NEXT: movl %edi, 12(%eax) +; X86-NEXT: decl %ecx +; X86-NEXT: jne .LBB0_1 +; X86-NEXT: # BB#2: # %exit +; X86-NEXT: addl %esi, %edx +; X86-NEXT: leal 1(%esi,%edx), %ecx +; X86-NEXT: movl %ecx, 16(%eax) +; X86-NEXT: popl %esi +; X86-NEXT: popl %edi +; X86-NEXT: retl + entry: + br label %loop + + loop: + %iter = phi i32 [%n ,%entry ] ,[ %iter.ctr ,%loop] + %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0 + %0 = load i32, i32* %h0, align 8 + %h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3 + %h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4 + %1 = load i32, i32* %h4, align 8 + %add = add i32 %0, 1 + %add4 = add i32 %add, %1 + store i32 %add4, i32* %h3, align 4 + %add29 = add i32 %add4, %1 + %iter.ctr = sub i32 %iter , 1 + %res = icmp ne i32 %iter.ctr , 0 + br i1 %res , label %loop , label %exit + + exit: + store i32 %add29, i32* %h4, align 8 + ret void +} Index: test/CodeGen/X86/lea-opt-cse3.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/lea-opt-cse3.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86 + +define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 { +; X64-LABEL: foo: +; X64: # BB#0: # %entry +; X64-NEXT: # kill: %ESI %ESI %RSI +; X64-NEXT: # kill: %EDI %EDI %RDI +; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx +; X64-NEXT: leal 4(%rdi,%rsi,4), %eax +; X64-NEXT: imull %ecx, %eax +; X64-NEXT: retq +; +; X86-LABEL: foo: +; X86: # BB#0: # %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: leal 4(%ecx,%eax,2), %edx +; X86-NEXT: leal 4(%ecx,%eax,4), %eax +; X86-NEXT: imull %edx, %eax +; X86-NEXT: retl +entry: + %mul = shl i32 %b, 1 + %add = add i32 %a, 4 + %add1 = add i32 %add, %mul + %mul2 = shl i32 %b, 2 + %add4 = add i32 %add, %mul2 + %mul5 = mul nsw i32 %add1, %add4 + ret i32 %mul5 +} +