Index: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp +++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp @@ -613,10 +613,10 @@ setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); } - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); + if (!isPPC64) { + setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); + setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); + } setBooleanContents(ZeroOrOneBooleanContent); // Altivec instructions set fields to all zeros or all ones. Index: llvm/trunk/lib/Target/PowerPC/PPCInstr64Bit.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCInstr64Bit.td +++ llvm/trunk/lib/Target/PowerPC/PPCInstr64Bit.td @@ -1135,3 +1135,9 @@ def : Pat<(unaligned4store i64:$rS, xoaddr:$dst), (STDX $rS, xoaddr:$dst)>; +// 64-bits atomic loads and stores +def : Pat<(atomic_load_64 ixaddr:$src), (LD memrix:$src)>; +def : Pat<(atomic_load_64 xaddr:$src), (LDX memrr:$src)>; + +def : Pat<(atomic_store_64 ixaddr:$ptr, i64:$val), (STD g8rc:$val, memrix:$ptr)>; +def : Pat<(atomic_store_64 xaddr:$ptr, i64:$val), (STDX g8rc:$val, memrr:$ptr)>; Index: llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.td +++ llvm/trunk/lib/Target/PowerPC/PPCInstrInfo.td @@ -3695,3 +3695,19 @@ defm : TrapExtendedMnemonic<"lnl", 5>; defm : TrapExtendedMnemonic<"lng", 6>; defm : TrapExtendedMnemonic<"u", 31>; + +// Atomic loads +def : Pat<(atomic_load_8 iaddr:$src), (LBZ memri:$src)>; +def : Pat<(atomic_load_16 iaddr:$src), (LHZ memri:$src)>; +def : Pat<(atomic_load_32 iaddr:$src), (LWZ memri:$src)>; +def : Pat<(atomic_load_8 xaddr:$src), (LBZX memrr:$src)>; +def : Pat<(atomic_load_16 xaddr:$src), (LHZX memrr:$src)>; +def : Pat<(atomic_load_32 xaddr:$src), (LWZX memrr:$src)>; + +// Atomic stores +def : Pat<(atomic_store_8 iaddr:$ptr, i32:$val), (STB gprc:$val, memri:$ptr)>; +def : Pat<(atomic_store_16 iaddr:$ptr, i32:$val), (STH gprc:$val, memri:$ptr)>; +def : Pat<(atomic_store_32 iaddr:$ptr, i32:$val), (STW gprc:$val, memri:$ptr)>; +def : Pat<(atomic_store_8 xaddr:$ptr, i32:$val), (STBX gprc:$val, memrr:$ptr)>; +def : Pat<(atomic_store_16 xaddr:$ptr, i32:$val), (STHX gprc:$val, memrr:$ptr)>; +def : Pat<(atomic_store_32 xaddr:$ptr, i32:$val), (STWX gprc:$val, memrr:$ptr)>; Index: llvm/trunk/test/CodeGen/PowerPC/atomic-2.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/atomic-2.ll +++ llvm/trunk/test/CodeGen/PowerPC/atomic-2.ll @@ -30,8 +30,9 @@ entry: ; CHECK: @atomic_store store atomic i64 %val, i64* %mem release, align 64 -; CHECK: ldarx -; CHECK: stdcx. +; CHECK: sync 1 +; CHECK-NOT: stdcx +; CHECK: std ret void } @@ -39,9 +40,9 @@ entry: ; CHECK: @atomic_load %tmp = load atomic i64* %mem acquire, align 64 -; CHECK: ldarx -; CHECK: stdcx. -; CHECK: stdcx. +; CHECK-NOT: ldarx +; CHECK: ld +; CHECK: sync 1 ret i64 %tmp } Index: llvm/trunk/test/CodeGen/PowerPC/atomics-indexed.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/atomics-indexed.ll +++ llvm/trunk/test/CodeGen/PowerPC/atomics-indexed.ll @@ -0,0 +1,81 @@ +; RUN: llc < %s -mtriple=powerpc-apple-darwin -march=ppc32 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32 +; FIXME: -verify-machineinstrs currently fail on ppc64 (mismatched register/instruction). +; This is already checked for in Atomics-64.ll +; RUN: llc < %s -mtriple=powerpc-apple-darwin -march=ppc64 | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64 + +; In this file, we check that atomic load/store can make use of the indexed +; versions of the instructions. + +; Indexed version of loads +define i8 @load_x_i8_seq_cst([100000 x i8]* %mem) { +; CHECK-LABEL: load_x_i8_seq_cst +; CHECK: sync 0 +; CHECK: lbzx +; CHECK: sync 1 + %ptr = getelementptr inbounds [100000 x i8]* %mem, i64 0, i64 90000 + %val = load atomic i8* %ptr seq_cst, align 1 + ret i8 %val +} +define i16 @load_x_i16_acquire([100000 x i16]* %mem) { +; CHECK-LABEL: load_x_i16_acquire +; CHECK: lhzx +; CHECK: sync 1 + %ptr = getelementptr inbounds [100000 x i16]* %mem, i64 0, i64 90000 + %val = load atomic i16* %ptr acquire, align 2 + ret i16 %val +} +define i32 @load_x_i32_monotonic([100000 x i32]* %mem) { +; CHECK-LABEL: load_x_i32_monotonic +; CHECK: lwzx +; CHECK-NOT: sync + %ptr = getelementptr inbounds [100000 x i32]* %mem, i64 0, i64 90000 + %val = load atomic i32* %ptr monotonic, align 4 + ret i32 %val +} +define i64 @load_x_i64_unordered([100000 x i64]* %mem) { +; CHECK-LABEL: load_x_i64_unordered +; PPC32: __sync_ +; PPC64-NOT: __sync_ +; PPC64: ldx +; CHECK-NOT: sync + %ptr = getelementptr inbounds [100000 x i64]* %mem, i64 0, i64 90000 + %val = load atomic i64* %ptr unordered, align 8 + ret i64 %val +} + +; Indexed version of stores +define void @store_x_i8_seq_cst([100000 x i8]* %mem) { +; CHECK-LABEL: store_x_i8_seq_cst +; CHECK: sync 0 +; CHECK: stbx + %ptr = getelementptr inbounds [100000 x i8]* %mem, i64 0, i64 90000 + store atomic i8 42, i8* %ptr seq_cst, align 1 + ret void +} +define void @store_x_i16_release([100000 x i16]* %mem) { +; CHECK-LABEL: store_x_i16_release +; CHECK: sync 1 +; CHECK: sthx + %ptr = getelementptr inbounds [100000 x i16]* %mem, i64 0, i64 90000 + store atomic i16 42, i16* %ptr release, align 2 + ret void +} +define void @store_x_i32_monotonic([100000 x i32]* %mem) { +; CHECK-LABEL: store_x_i32_monotonic +; CHECK-NOT: sync +; CHECK: stwx + %ptr = getelementptr inbounds [100000 x i32]* %mem, i64 0, i64 90000 + store atomic i32 42, i32* %ptr monotonic, align 4 + ret void +} +define void @store_x_i64_unordered([100000 x i64]* %mem) { +; CHECK-LABEL: store_x_i64_unordered +; CHECK-NOT: sync 0 +; CHECK-NOT: sync 1 +; PPC32: __sync_ +; PPC64-NOT: __sync_ +; PPC64: stdx + %ptr = getelementptr inbounds [100000 x i64]* %mem, i64 0, i64 90000 + store atomic i64 42, i64* %ptr unordered, align 8 + ret void +} Index: llvm/trunk/test/CodeGen/PowerPC/atomics.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/atomics.ll +++ llvm/trunk/test/CodeGen/PowerPC/atomics.ll @@ -11,18 +11,21 @@ ; We also vary orderings to check for barriers. define i8 @load_i8_unordered(i8* %mem) { ; CHECK-LABEL: load_i8_unordered +; CHECK: lbz ; CHECK-NOT: sync %val = load atomic i8* %mem unordered, align 1 ret i8 %val } define i16 @load_i16_monotonic(i16* %mem) { ; CHECK-LABEL: load_i16_monotonic +; CHECK: lhz ; CHECK-NOT: sync %val = load atomic i16* %mem monotonic, align 2 ret i16 %val } define i32 @load_i32_acquire(i32* %mem) { ; CHECK-LABEL: load_i32_acquire +; CHECK: lwz %val = load atomic i32* %mem acquire, align 4 ; CHECK: sync 1 ret i32 %val @@ -30,6 +33,9 @@ define i64 @load_i64_seq_cst(i64* %mem) { ; CHECK-LABEL: load_i64_seq_cst ; CHECK: sync 0 +; PPC32: __sync_ +; PPC64-NOT: __sync_ +; PPC64: ld %val = load atomic i64* %mem seq_cst, align 8 ; CHECK: sync 1 ret i64 %val @@ -39,24 +45,30 @@ define void @store_i8_unordered(i8* %mem) { ; CHECK-LABEL: store_i8_unordered ; CHECK-NOT: sync +; CHECK: stb store atomic i8 42, i8* %mem unordered, align 1 ret void } define void @store_i16_monotonic(i16* %mem) { ; CHECK-LABEL: store_i16_monotonic ; CHECK-NOT: sync +; CHECK: sth store atomic i16 42, i16* %mem monotonic, align 2 ret void } define void @store_i32_release(i32* %mem) { ; CHECK-LABEL: store_i32_release ; CHECK: sync 1 +; CHECK: stw store atomic i32 42, i32* %mem release, align 4 ret void } define void @store_i64_seq_cst(i64* %mem) { ; CHECK-LABEL: store_i64_seq_cst ; CHECK: sync 0 +; PPC32: __sync_ +; PPC64-NOT: __sync_ +; PPC64: std store atomic i64 42, i64* %mem seq_cst, align 8 ret void } Index: llvm/trunk/test/CodeGen/PowerPC/pr15630.ll =================================================================== --- llvm/trunk/test/CodeGen/PowerPC/pr15630.ll +++ llvm/trunk/test/CodeGen/PowerPC/pr15630.ll @@ -13,4 +13,5 @@ ret void } -; CHECK: stwcx. +; CHECK: sync +; CHECK: stb