diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td --- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td +++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td @@ -1108,10 +1108,20 @@ def atomic_store_unordered_monotonic_64 : unordered_monotonic_store; -defm : StPat; -defm : StPat; -defm : StPat, - Requires<[IsLA32]>; +/// AtomicStores + +multiclass AtomicStPat { + def : Pat<(StoreOp BaseAddr:$ptr, (vt StTy:$val)), + (Inst StTy:$val, BaseAddr:$ptr, 0)>; + def : Pat<(StoreOp (AddLike BaseAddr:$ptr, simm12:$imm12), (vt StTy:$val)), + (Inst StTy:$val, BaseAddr:$ptr, simm12:$imm12)>; +} + +defm : AtomicStPat; +defm : AtomicStPat; +defm : AtomicStPat, + Requires<[IsLA32]>; def PseudoAtomicStoreW : Pseudo<(outs GPR:$dst), (ins GPR:$rj, GPR:$rk)>, PseudoInstExpansion<(AMSWAP_DB_W R0, @@ -1129,8 +1139,8 @@ (PseudoAtomicStoreD GPR:$rj, GPR:$rk)>; defm : LdPat; -defm : StPat; -defm : StPat; +defm : AtomicStPat; +defm : AtomicStPat; } // Predicates = [IsLA64] /// Atomic Ops diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll --- a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll +++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll @@ -76,13 +76,13 @@ ; LA32-LABEL: store_release_i8: ; LA32: # %bb.0: ; LA32-NEXT: dbar 0 -; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: st.b $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_release_i8: ; LA64: # %bb.0: ; LA64-NEXT: dbar 0 -; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: st.b $a1, $a0, 0 ; LA64-NEXT: ret store atomic i8 %v, ptr %ptr release, align 1 ret void @@ -92,13 +92,13 @@ ; LA32-LABEL: store_release_i16: ; LA32: # %bb.0: ; LA32-NEXT: dbar 0 -; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: st.h $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_release_i16: ; LA64: # %bb.0: ; LA64-NEXT: dbar 0 -; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: st.h $a1, $a0, 0 ; LA64-NEXT: ret store atomic i16 %v, ptr %ptr release, align 2 ret void @@ -108,7 +108,7 @@ ; LA32-LABEL: store_release_i32: ; LA32: # %bb.0: ; LA32-NEXT: dbar 0 -; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: st.w $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_release_i32: @@ -143,12 +143,12 @@ define void @store_unordered_i8(ptr %ptr, i8 signext %v) { ; LA32-LABEL: store_unordered_i8: ; LA32: # %bb.0: -; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: st.b $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_unordered_i8: ; LA64: # %bb.0: -; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: st.b $a1, $a0, 0 ; LA64-NEXT: ret store atomic i8 %v, ptr %ptr unordered, align 1 ret void @@ -157,12 +157,12 @@ define void @store_unordered_i16(ptr %ptr, i16 signext %v) { ; LA32-LABEL: store_unordered_i16: ; LA32: # %bb.0: -; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: st.h $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_unordered_i16: ; LA64: # %bb.0: -; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: st.h $a1, $a0, 0 ; LA64-NEXT: ret store atomic i16 %v, ptr %ptr unordered, align 2 ret void @@ -171,12 +171,12 @@ define void @store_unordered_i32(ptr %ptr, i32 signext %v) { ; LA32-LABEL: store_unordered_i32: ; LA32: # %bb.0: -; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: st.w $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_unordered_i32: ; LA64: # %bb.0: -; LA64-NEXT: st.w $a0, $a1, 0 +; LA64-NEXT: st.w $a1, $a0, 0 ; LA64-NEXT: ret store atomic i32 %v, ptr %ptr unordered, align 4 ret void @@ -197,7 +197,7 @@ ; ; LA64-LABEL: store_unordered_i64: ; LA64: # %bb.0: -; LA64-NEXT: st.d $a0, $a1, 0 +; LA64-NEXT: st.d $a1, $a0, 0 ; LA64-NEXT: ret store atomic i64 %v, ptr %ptr unordered, align 8 ret void @@ -206,12 +206,12 @@ define void @store_monotonic_i8(ptr %ptr, i8 signext %v) { ; LA32-LABEL: store_monotonic_i8: ; LA32: # %bb.0: -; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: st.b $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_monotonic_i8: ; LA64: # %bb.0: -; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: st.b $a1, $a0, 0 ; LA64-NEXT: ret store atomic i8 %v, ptr %ptr monotonic, align 1 ret void @@ -220,12 +220,12 @@ define void @store_monotonic_i16(ptr %ptr, i16 signext %v) { ; LA32-LABEL: store_monotonic_i16: ; LA32: # %bb.0: -; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: st.h $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_monotonic_i16: ; LA64: # %bb.0: -; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: st.h $a1, $a0, 0 ; LA64-NEXT: ret store atomic i16 %v, ptr %ptr monotonic, align 2 ret void @@ -234,12 +234,12 @@ define void @store_monotonic_i32(ptr %ptr, i32 signext %v) { ; LA32-LABEL: store_monotonic_i32: ; LA32: # %bb.0: -; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: st.w $a1, $a0, 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_monotonic_i32: ; LA64: # %bb.0: -; LA64-NEXT: st.w $a0, $a1, 0 +; LA64-NEXT: st.w $a1, $a0, 0 ; LA64-NEXT: ret store atomic i32 %v, ptr %ptr monotonic, align 4 ret void @@ -260,7 +260,7 @@ ; ; LA64-LABEL: store_monotonic_i64: ; LA64: # %bb.0: -; LA64-NEXT: st.d $a0, $a1, 0 +; LA64-NEXT: st.d $a1, $a0, 0 ; LA64-NEXT: ret store atomic i64 %v, ptr %ptr monotonic, align 8 ret void @@ -270,14 +270,14 @@ ; LA32-LABEL: store_seq_cst_i8: ; LA32: # %bb.0: ; LA32-NEXT: dbar 0 -; LA32-NEXT: st.b $a0, $a1, 0 +; LA32-NEXT: st.b $a1, $a0, 0 ; LA32-NEXT: dbar 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_seq_cst_i8: ; LA64: # %bb.0: ; LA64-NEXT: dbar 0 -; LA64-NEXT: st.b $a0, $a1, 0 +; LA64-NEXT: st.b $a1, $a0, 0 ; LA64-NEXT: dbar 0 ; LA64-NEXT: ret store atomic i8 %v, ptr %ptr seq_cst, align 1 @@ -288,14 +288,14 @@ ; LA32-LABEL: store_seq_cst_i16: ; LA32: # %bb.0: ; LA32-NEXT: dbar 0 -; LA32-NEXT: st.h $a0, $a1, 0 +; LA32-NEXT: st.h $a1, $a0, 0 ; LA32-NEXT: dbar 0 ; LA32-NEXT: ret ; ; LA64-LABEL: store_seq_cst_i16: ; LA64: # %bb.0: ; LA64-NEXT: dbar 0 -; LA64-NEXT: st.h $a0, $a1, 0 +; LA64-NEXT: st.h $a1, $a0, 0 ; LA64-NEXT: dbar 0 ; LA64-NEXT: ret store atomic i16 %v, ptr %ptr seq_cst, align 2 @@ -306,7 +306,7 @@ ; LA32-LABEL: store_seq_cst_i32: ; LA32: # %bb.0: ; LA32-NEXT: dbar 0 -; LA32-NEXT: st.w $a0, $a1, 0 +; LA32-NEXT: st.w $a1, $a0, 0 ; LA32-NEXT: dbar 0 ; LA32-NEXT: ret ;