Index: test/CodeGen/X86/atomic_mi.ll =================================================================== --- test/CodeGen/X86/atomic_mi.ll +++ test/CodeGen/X86/atomic_mi.ll @@ -18,6 +18,9 @@ ; seq_cst stores are left as (lock) xchgl, but we try to check every other ; attribute at least once. +; Please note that these operations do not require the lock prefix: only +; sequentially consistent stores require this kind of protection on X86. + define void @store_atomic_imm_8(i8* %p) { ; X64-LABEL: store_atomic_imm_8 ; X64: movb @@ -90,9 +93,11 @@ define void @add_8(i8* %p) { ; X64-LABEL: add_8 +; X64-NOT: lock ; X64: addb ; X64-NOT: movb ; X32-LABEL: add_8 +; X32-NOT: lock ; X32: addb ; X32-NOT: movb %1 = load atomic i8* %p seq_cst, align 1 @@ -116,9 +121,11 @@ define void @add_32(i32* %p) { ; X64-LABEL: add_32 +; X64-NOT: lock ; X64: addl ; X64-NOT: movl ; X32-LABEL: add_32 +; X32-NOT: lock ; X32: addl ; X32-NOT: movl %1 = load atomic i32* %p acquire, align 4 @@ -129,6 +136,7 @@ define void @add_64(i64* %p) { ; X64-LABEL: add_64 +; X64-NOT: lock ; X64: addq ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'addq'. @@ -154,9 +162,11 @@ define void @and_8(i8* %p) { ; X64-LABEL: and_8 +; X64-NOT: lock ; X64: andb ; X64-NOT: movb ; X32-LABEL: and_8 +; X32-NOT: lock ; X32: andb ; X32-NOT: movb %1 = load atomic i8* %p monotonic, align 1 @@ -180,9 +190,11 @@ define void @and_32(i32* %p) { ; X64-LABEL: and_32 +; X64-NOT: lock ; X64: andl ; X64-NOT: movl ; X32-LABEL: and_32 +; X32-NOT: lock ; X32: andl ; X32-NOT: movl %1 = load atomic i32* %p acquire, align 4 @@ -193,6 +205,7 @@ define void @and_64(i64* %p) { ; X64-LABEL: and_64 +; X64-NOT: lock ; X64: andq ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'andq'. @@ -203,13 +216,26 @@ ret void } +define void @and_32_seq_cst(i32* %p) { +; X64-LABEL: and_32_seq_cst +; X64: xchgl +; X32-LABEL: and_32_seq_cst +; X32: xchgl + %1 = load atomic i32* %p monotonic, align 4 + %2 = and i32 %1, 2 + store atomic i32 %2, i32* %p seq_cst, align 4 + ret void +} + ; ----- OR ----- define void @or_8(i8* %p) { ; X64-LABEL: or_8 +; X64-NOT: lock ; X64: orb ; X64-NOT: movb ; X32-LABEL: or_8 +; X32-NOT: lock ; X32: orb ; X32-NOT: movb %1 = load atomic i8* %p acquire, align 1 @@ -231,9 +257,11 @@ define void @or_32(i32* %p) { ; X64-LABEL: or_32 +; X64-NOT: lock ; X64: orl ; X64-NOT: movl ; X32-LABEL: or_32 +; X32-NOT: lock ; X32: orl ; X32-NOT: movl %1 = load atomic i32* %p acquire, align 4 @@ -244,6 +272,7 @@ define void @or_64(i64* %p) { ; X64-LABEL: or_64 +; X64-NOT: lock ; X64: orq ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'orq'. @@ -254,13 +283,26 @@ ret void } +define void @or_32_seq_cst(i32* %p) { +; X64-LABEL: or_32_seq_cst +; X64: xchgl +; X32-LABEL: or_32_seq_cst +; X32: xchgl + %1 = load atomic i32* %p monotonic, align 4 + %2 = or i32 %1, 2 + store atomic i32 %2, i32* %p seq_cst, align 4 + ret void +} + ; ----- XOR ----- define void @xor_8(i8* %p) { ; X64-LABEL: xor_8 +; X64-NOT: lock ; X64: xorb ; X64-NOT: movb ; X32-LABEL: xor_8 +; X32-NOT: lock ; X32: xorb ; X32-NOT: movb %1 = load atomic i8* %p acquire, align 1 @@ -282,9 +324,11 @@ define void @xor_32(i32* %p) { ; X64-LABEL: xor_32 +; X64-NOT: lock ; X64: xorl ; X64-NOT: movl ; X32-LABEL: xor_32 +; X32-NOT: lock ; X32: xorl ; X32-NOT: movl %1 = load atomic i32* %p acquire, align 4 @@ -295,6 +339,7 @@ define void @xor_64(i64* %p) { ; X64-LABEL: xor_64 +; X64-NOT: lock ; X64: xorq ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'xorq'. @@ -305,13 +350,26 @@ ret void } +define void @xor_32_seq_cst(i32* %p) { +; X64-LABEL: xor_32_seq_cst +; X64: xchgl +; X32-LABEL: xor_32_seq_cst +; X32: xchgl + %1 = load atomic i32* %p monotonic, align 4 + %2 = xor i32 %1, 2 + store atomic i32 %2, i32* %p seq_cst, align 4 + ret void +} + ; ----- INC ----- define void @inc_8(i8* %p) { ; X64-LABEL: inc_8 +; X64-NOT: lock ; X64: incb ; X64-NOT: movb ; X32-LABEL: inc_8 +; X32-NOT: lock ; X32: incb ; X32-NOT: movb %1 = load atomic i8* %p seq_cst, align 1 @@ -335,9 +393,11 @@ define void @inc_32(i32* %p) { ; X64-LABEL: inc_32 +; X64-NOT: lock ; X64: incl ; X64-NOT: movl ; X32-LABEL: inc_32 +; X32-NOT: lock ; X32: incl ; X32-NOT: movl %1 = load atomic i32* %p acquire, align 4 @@ -348,6 +408,7 @@ define void @inc_64(i64* %p) { ; X64-LABEL: inc_64 +; X64-NOT: lock ; X64: incq ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'incq'. @@ -373,9 +434,11 @@ define void @dec_8(i8* %p) { ; X64-LABEL: dec_8 +; X64-NOT: lock ; X64: decb ; X64-NOT: movb ; X32-LABEL: dec_8 +; X32-NOT: lock ; X32: decb ; X32-NOT: movb %1 = load atomic i8* %p seq_cst, align 1 @@ -399,9 +462,11 @@ define void @dec_32(i32* %p) { ; X64-LABEL: dec_32 +; X64-NOT: lock ; X64: decl ; X64-NOT: movl ; X32-LABEL: dec_32 +; X32-NOT: lock ; X32: decl ; X32-NOT: movl %1 = load atomic i32* %p acquire, align 4 @@ -412,6 +477,7 @@ define void @dec_64(i64* %p) { ; X64-LABEL: dec_64 +; X64-NOT: lock ; X64: decq ; X64-NOT: movq ; We do not check X86-32 as it cannot do 'decq'.