Index: lib/Target/Mips/Mips64InstrInfo.td =================================================================== --- lib/Target/Mips/Mips64InstrInfo.td +++ lib/Target/Mips/Mips64InstrInfo.td @@ -546,6 +546,11 @@ (BBIT132 i64:$lhs, (Log2HI PowerOf2HI:$mask), bb:$dst)>; } +defm : AtomicLoadStorePats; +defm : AtomicLoadStorePats; +defm : AtomicLoadStorePats; +defm : AtomicLoadStorePats; + //===----------------------------------------------------------------------===// // Instruction aliases //===----------------------------------------------------------------------===// Index: lib/Target/Mips/MipsISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsISelLowering.cpp +++ lib/Target/Mips/MipsISelLowering.cpp @@ -391,10 +391,10 @@ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); - setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); - setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); + if (!Subtarget.isGP64bit()) { + setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); + setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); + } setInsertFencesForAtomic(true); Index: lib/Target/Mips/MipsInstrInfo.td =================================================================== --- lib/Target/Mips/MipsInstrInfo.td +++ lib/Target/Mips/MipsInstrInfo.td @@ -2083,6 +2083,18 @@ } } +// Atomic load patterns. +multiclass AtomicLoadStorePats { + def : MipsPat<(LdNode addr:$a), (LdInst addr:$a)>; + def : MipsPat<(StNode addr:$a, RC:$v), (StInst RC:$v, addr:$a)>; +} + +defm : AtomicLoadStorePats; +defm : AtomicLoadStorePats; +defm : AtomicLoadStorePats; + //===----------------------------------------------------------------------===// // Floating Point Support //===----------------------------------------------------------------------===// Index: test/CodeGen/Mips/atomic-load-store.ll =================================================================== --- /dev/null +++ test/CodeGen/Mips/atomic-load-store.ll @@ -0,0 +1,78 @@ +; RUN: llc -march=mips -mcpu=mips32r2 < %s | FileCheck %s -check-prefix=ALL +; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s -check-prefix=ALL +; RUN: llc -march=mips64 -mcpu=mips64r2 < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=M64 +; RUN: llc -march=mips64 -mcpu=mips64r6 < %s | \ +; RUN: FileCheck %s -check-prefix=ALL -check-prefix=M64 + +define i8 @load_i8(i8* %ptr) { +; ALL-LABEL: load_i8 + +; ALL: lb $2, 0($4) +; ALL: sync + %val = load atomic i8, i8* %ptr acquire, align 1 + ret i8 %val +} + +define i16 @load_i16(i16* %ptr) { +; ALL-LABEL: load_i16 + +; ALL: lh $2, 0($4) +; ALL: sync + %val = load atomic i16, i16* %ptr acquire, align 2 + ret i16 %val +} + +define i32 @load_i32(i32* %ptr) { +; ALL-LABEL: load_i32 + +; ALL: lw $2, 0($4) +; ALL: sync + %val = load atomic i32, i32* %ptr acquire, align 4 + ret i32 %val +} + +define i64 @load_i64(i64* %ptr) { +; M64-LABEL: load_i64 + +; M64: ld $2, 0($4) +; M64: sync + %val = load atomic i64, i64* %ptr acquire, align 8 + ret i64 %val +} + +define void @store_i8(i8* %ptr, i8 signext %v) { +; ALL-LABEL: store_i8 + +; ALL: sync +; ALL: sb $5, 0($4) + store atomic i8 %v, i8* %ptr release, align 1 + ret void +} + +define void @store_i16(i16* %ptr, i16 signext %v) { +; ALL-LABEL: store_i16 + +; ALL: sync +; ALL: sh $5, 0($4) + store atomic i16 %v, i16* %ptr release, align 2 + ret void +} + +define void @store_i32(i32* %ptr, i32 signext %v) { +; ALL-LABEL: store_i32 + +; ALL: sync +; ALL: sw $5, 0($4) + store atomic i32 %v, i32* %ptr release, align 4 + ret void +} + +define void @store_i64(i64* %ptr, i64 %v) { +; M64-LABEL: store_i64 + +; M64: sync +; M64: sd $5, 0($4) + store atomic i64 %v, i64* %ptr release, align 8 + ret void +} Index: test/CodeGen/Mips/atomicSCr6.ll =================================================================== --- test/CodeGen/Mips/atomicSCr6.ll +++ test/CodeGen/Mips/atomicSCr6.ll @@ -1,32 +1,26 @@ -; RUN: llc -asm-show-inst -march=mips64el -mcpu=mips64r6 < %s -filetype=asm -o - | FileCheck %s -check-prefix=CHK64 -; RUN: llc -asm-show-inst -march=mipsel -mcpu=mips32r6 < %s -filetype=asm -o -| FileCheck %s -check-prefix=CHK32 +; RUN: llc -asm-show-inst -march=mipsel -mcpu=mips32r6 < %s | \ +; RUN: FileCheck %s -check-prefix=CHK32 +; RUN: llc -asm-show-inst -march=mips64el -mcpu=mips64r6 < %s | \ +; RUN: FileCheck %s -check-prefix=CHK64 -define internal i32 @atomic_load_test1() #0 { -entry: +@a = common global i32 0, align 4 +@b = common global i64 0, align 8 - %load_add = alloca i32*, align 8 - %.atomicdst = alloca i32, align 4 - %0 = load i32*, i32** %load_add, align 8 - %1 = load atomic i32, i32* %0 acquire, align 4 - store i32 %1, i32* %.atomicdst, align 4 - %2 = load i32, i32* %.atomicdst, align 4 - - ret i32 %2 -} -define internal i64 @atomic_load_test2() #0 { -entry: +define i32 @ll_sc(i32 signext %x) { +; CHK32-LABEL: ll_sc - %load_add = alloca i64*, align 16 - %.atomicdst = alloca i64, align 8 - %0 = load i64*, i64** %load_add, align 16 - %1 = load atomic i64, i64* %0 acquire, align 8 - store i64 %1, i64* %.atomicdst, align 8 - %2 = load i64, i64* %.atomicdst, align 8 - - ret i64 %2 -} ;CHK32: LL_R6 ;CHK32: SC_R6 + %1 = atomicrmw add i32* @a, i32 %x monotonic + ret i32 %1 +} + +define i64 @lld_scd(i64 signext %x) { +; CHK64-LABEL: lld_scd + ;CHK64: LLD_R6 ;CHK64: SCD_R6 + %1 = atomicrmw add i64* @b, i64 %x monotonic + ret i64 %1 +}