diff --git a/llvm/test/CodeGen/X86/pcsections-atomics.ll b/llvm/test/CodeGen/X86/pcsections-atomics.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/pcsections-atomics.ll @@ -0,0 +1,149 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; +; RUN: llc -O0 < %s | FileCheck %s --check-prefixes=O0 +; RUN: llc -O1 < %s | FileCheck %s --check-prefixes=O1 +; RUN: llc -O2 < %s | FileCheck %s --check-prefixes=O2 +; RUN: llc -O3 < %s | FileCheck %s --check-prefixes=O3 + +target triple = "x86_64-unknown-linux-gnu" + +@foo = dso_local global i64 0, align 8 +@bar = dso_local global i64 0, align 8 + +define i64 @test_simple_atomic() { +; O0-LABEL: test_simple_atomic: +; O0: # %bb.0: # %entry +; O0-NEXT: .Lpcsection0: +; O0-NEXT: movq foo(%rip), %rax +; O0-NEXT: addq bar, %rax +; O0-NEXT: retq +; +; O1-LABEL: test_simple_atomic: +; O1: # %bb.0: # %entry +; O1-NEXT: .Lpcsection0: +; O1-NEXT: movq foo(%rip), %rax +; O1-NEXT: addq bar(%rip), %rax +; O1-NEXT: retq +; +; O2-LABEL: test_simple_atomic: +; O2: # %bb.0: # %entry +; O2-NEXT: .Lpcsection0: +; O2-NEXT: movq foo(%rip), %rax +; O2-NEXT: addq bar(%rip), %rax +; O2-NEXT: retq +; +; O3-LABEL: test_simple_atomic: +; O3: # %bb.0: # %entry +; O3-NEXT: .Lpcsection0: +; O3-NEXT: movq foo(%rip), %rax +; O3-NEXT: addq bar(%rip), %rax +; O3-NEXT: retq +entry: + %0 = load atomic i64, ptr @foo monotonic, align 8, !pcsections !0 + %1 = load i64, ptr @bar, align 8 + %add = add nsw i64 %1, %0 + ret i64 %add +} + +define i64 @test_complex_atomic() { +; O0-LABEL: test_complex_atomic: +; O0: # %bb.0: # %entry +; O0-NEXT: movl $1, %ecx +; O0-NEXT: .Lpcsection1: +; O0-NEXT: lock xaddq %rcx, foo(%rip) +; O0-NEXT: movq bar, %rax +; O0-NEXT: movq %rax, %rdx +; O0-NEXT: addq $1, %rdx +; O0-NEXT: movq %rdx, bar +; O0-NEXT: addq %rcx, %rax +; O0-NEXT: retq +; +; O1-LABEL: test_complex_atomic: +; O1: # %bb.0: # %entry +; O1-NEXT: movl $1, %eax +; O1-NEXT: .Lpcsection1: +; O1-NEXT: lock xaddq %rax, foo(%rip) +; O1-NEXT: movq bar(%rip), %rcx +; O1-NEXT: leaq 1(%rcx), %rdx +; O1-NEXT: movq %rdx, bar(%rip) +; O1-NEXT: addq %rcx, %rax +; O1-NEXT: retq +; +; O2-LABEL: test_complex_atomic: +; O2: # %bb.0: # %entry +; O2-NEXT: movl $1, %eax +; O2-NEXT: .Lpcsection1: +; O2-NEXT: lock xaddq %rax, foo(%rip) +; O2-NEXT: movq bar(%rip), %rcx +; O2-NEXT: leaq 1(%rcx), %rdx +; O2-NEXT: movq %rdx, bar(%rip) +; O2-NEXT: addq %rcx, %rax +; O2-NEXT: retq +; +; O3-LABEL: test_complex_atomic: +; O3: # %bb.0: # %entry +; O3-NEXT: movl $1, %eax +; O3-NEXT: .Lpcsection1: +; O3-NEXT: lock xaddq %rax, foo(%rip) +; O3-NEXT: movq bar(%rip), %rcx +; O3-NEXT: leaq 1(%rcx), %rdx +; O3-NEXT: movq %rdx, bar(%rip) +; O3-NEXT: addq %rcx, %rax +; O3-NEXT: retq +entry: + %0 = atomicrmw add ptr @foo, i64 1 monotonic, align 8, !pcsections !0 + %1 = load i64, ptr @bar, align 8 + %inc = add nsw i64 %1, 1 + store i64 %inc, ptr @bar, align 8 + %add = add nsw i64 %1, %0 + ret i64 %add +} + +define void @mixed_atomic_non_atomic() { +; O0-LABEL: mixed_atomic_non_atomic: +; O0: # %bb.0: # %entry +; O0-NEXT: movl foo(%rip), %eax +; O0-NEXT: incl %eax +; O0-NEXT: movl %eax, foo(%rip) +; O0-NEXT: .Lpcsection2: +; O0-NEXT: movl $1, foo(%rip) +; O0-NEXT: movl foo, %eax +; O0-NEXT: addl $-1, %eax +; O0-NEXT: movl %eax, foo +; O0-NEXT: retq +; +; O1-LABEL: mixed_atomic_non_atomic: +; O1: # %bb.0: # %entry +; O1-NEXT: incl foo(%rip) +; O1-NEXT: .Lpcsection2: +; O1-NEXT: movl $1, foo(%rip) +; O1-NEXT: decl foo(%rip) +; O1-NEXT: retq +; +; O2-LABEL: mixed_atomic_non_atomic: +; O2: # %bb.0: # %entry +; O2-NEXT: incl foo(%rip) +; O2-NEXT: .Lpcsection2: +; O2-NEXT: movl $1, foo(%rip) +; O2-NEXT: decl foo(%rip) +; O2-NEXT: retq +; +; O3-LABEL: mixed_atomic_non_atomic: +; O3: # %bb.0: # %entry +; O3-NEXT: incl foo(%rip) +; O3-NEXT: .Lpcsection2: +; O3-NEXT: movl $1, foo(%rip) +; O3-NEXT: decl foo(%rip) +; O3-NEXT: retq +entry: + %0 = load volatile i32, ptr @foo, align 4 + %inc = add nsw i32 %0, 1 + store volatile i32 %inc, ptr @foo, align 4 + store atomic volatile i32 1, ptr @foo monotonic, align 4, !pcsections !0 + %1 = load volatile i32, ptr @foo, align 4 + %dec = add nsw i32 %1, -1 + store volatile i32 %dec, ptr @foo, align 4 + ret void +} + +!0 = !{!"somesection"} diff --git a/llvm/test/CodeGen/X86/pcsections.ll b/llvm/test/CodeGen/X86/pcsections.ll --- a/llvm/test/CodeGen/X86/pcsections.ll +++ b/llvm/test/CodeGen/X86/pcsections.ll @@ -1,3 +1,7 @@ +; This test should verify that adding !pcsections metadata is encoded properly +; by the AsmPrinter. For tests to check that metadata is propagated to +; assembly, see pcsections-*.ll tests. + ; RUN: llc -O0 < %s | FileCheck %s --check-prefixes=CHECK,DEFCM ; RUN: llc -O1 < %s | FileCheck %s --check-prefixes=CHECK,DEFCM ; RUN: llc -O2 < %s | FileCheck %s --check-prefixes=CHECK,DEFCM @@ -73,85 +77,21 @@ ret i64 %0 } -define i64 @test_simple_atomic() { -; CHECK-LABEL: test_simple_atomic: -; CHECK: .Lpcsection1: -; CHECK-NEXT: movq -; CHECK-NOT: .Lpcsection -; CHECK: addq -; CHECK-NEXT: retq -; CHECK-NEXT: .Lfunc_end3: -; CHECK: .section section_no_aux,"awo",@progbits,.text -; CHECK-NEXT: .Lpcsection_base5: -; DEFCM-NEXT: .long .Lpcsection1-.Lpcsection_base5 -; LARGE-NEXT: .quad .Lpcsection1-.Lpcsection_base5 -; CHECK-NEXT: .text -entry: - %0 = load atomic i64, ptr @foo monotonic, align 8, !pcsections !0 - %1 = load i64, ptr @bar, align 8 - %add = add nsw i64 %1, %0 - ret i64 %add -} - -define i64 @test_complex_atomic() { -; CHECK-LABEL: test_complex_atomic: -; CHECK: movl $1 -; CHECK-NEXT: .Lpcsection2: -; CHECK-NEXT: lock xaddq -; CHECK-NOT: .Lpcsection -; CHECK: movq -; CHECK: addq -; CHECK: retq -; CHECK-NEXT: .Lfunc_end4: -; CHECK: .section section_no_aux,"awo",@progbits,.text -; CHECK-NEXT: .Lpcsection_base6: -; DEFCM-NEXT: .long .Lpcsection2-.Lpcsection_base6 -; LARGE-NEXT: .quad .Lpcsection2-.Lpcsection_base6 -; CHECK-NEXT: .text -entry: - %0 = atomicrmw add ptr @foo, i64 1 monotonic, align 8, !pcsections !0 - %1 = load i64, ptr @bar, align 8 - %inc = add nsw i64 %1, 1 - store i64 %inc, ptr @bar, align 8 - %add = add nsw i64 %1, %0 - ret i64 %add -} - -define void @mixed_atomic_non_atomic() { -; CHECK-LABEL: mixed_atomic_non_atomic: -; CHECK: .Lpcsection -; CHECK-NEXT: movl $1 -; CHECK: .section section_no_aux,"awo",@progbits,.text -; CHECK-NEXT: .Lpcsection_base7: -; DEFCM-NEXT: .long .Lpcsection3-.Lpcsection_base7 -; LARGE-NEXT: .quad .Lpcsection3-.Lpcsection_base7 -; CHECK-NEXT: .text -entry: - %0 = load volatile i32, ptr @foo, align 4 - %inc = add nsw i32 %0, 1 - store volatile i32 %inc, ptr @foo, align 4 - store atomic volatile i32 1, ptr @foo monotonic, align 4, !pcsections !0 - %1 = load volatile i32, ptr @foo, align 4 - %dec = add nsw i32 %1, -1 - store volatile i32 %dec, ptr @foo, align 4 - ret void -} - define void @multiple_uleb128() !pcsections !6 { ; CHECK-LABEL: multiple_uleb128: ; CHECK: .section section_aux,"awo",@progbits,.text -; CHECK-NEXT: .Lpcsection_base8: -; DEFCM-NEXT: .long .Lfunc_begin3-.Lpcsection_base8 -; LARGE-NEXT: .quad .Lfunc_begin3-.Lpcsection_base8 -; CHECK-NEXT: .uleb128 .Lfunc_end6-.Lfunc_begin3 +; CHECK-NEXT: .Lpcsection_base5: +; DEFCM-NEXT: .long .Lfunc_begin3-.Lpcsection_base5 +; LARGE-NEXT: .quad .Lfunc_begin3-.Lpcsection_base5 +; CHECK-NEXT: .uleb128 .Lfunc_end3-.Lfunc_begin3 ; CHECK-NEXT: .byte 42 ; CHECK-NEXT: .ascii "\345\216&" ; CHECK-NEXT: .byte 255 ; CHECK-NEXT: .section section_aux_21264,"awo",@progbits,.text -; CHECK-NEXT: .Lpcsection_base9: -; DEFCM-NEXT: .long .Lfunc_begin3-.Lpcsection_base9 -; LARGE-NEXT: .quad .Lfunc_begin3-.Lpcsection_base9 -; CHECK-NEXT: .long .Lfunc_end6-.Lfunc_begin3 +; CHECK-NEXT: .Lpcsection_base6: +; DEFCM-NEXT: .long .Lfunc_begin3-.Lpcsection_base6 +; LARGE-NEXT: .quad .Lfunc_begin3-.Lpcsection_base6 +; CHECK-NEXT: .long .Lfunc_end3-.Lfunc_begin3 ; CHECK-NEXT: .long 21264 ; CHECK-NEXT: .text entry: