Index: llvm/lib/Target/AArch64/AArch64InstrAtomics.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrAtomics.td +++ llvm/lib/Target/AArch64/AArch64InstrAtomics.td @@ -271,22 +271,46 @@ def stxr_1 : PatFrag<(ops node:$val, node:$ptr), (int_aarch64_stxr node:$val, node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i8; -}]>; +}]> { + let GISelPredicateCode = [{ + if (!MI.hasOneMemOperand()) + return false; + return (*MI.memoperands_begin())->getSize() == 1; + }]; +} def stxr_2 : PatFrag<(ops node:$val, node:$ptr), (int_aarch64_stxr node:$val, node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i16; -}]>; +}]> { + let GISelPredicateCode = [{ + if (!MI.hasOneMemOperand()) + return false; + return (*MI.memoperands_begin())->getSize() == 2; + }]; +} def stxr_4 : PatFrag<(ops node:$val, node:$ptr), (int_aarch64_stxr node:$val, node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i32; -}]>; +}]> { + let GISelPredicateCode = [{ + if (!MI.hasOneMemOperand()) + return false; + return (*MI.memoperands_begin())->getSize() == 4; + }]; +} def stxr_8 : PatFrag<(ops node:$val, node:$ptr), (int_aarch64_stxr node:$val, node:$ptr), [{ return cast(N)->getMemoryVT() == MVT::i64; -}]>; +}]> { + let GISelPredicateCode = [{ + if (!MI.hasOneMemOperand()) + return false; + return (*MI.memoperands_begin())->getSize() == 8; + }]; +} def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr), Index: llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/GlobalISel/select-stx.mir @@ -0,0 +1,120 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s + +--- | + define void @test_store_i8(i32, i8 %val, i8* %addr) { ret void } + define void @test_store_i16(i32, i16 %val, i16* %addr) { ret void } + define void @test_store_i32(i32, i32 %val, i32* %addr) { ret void } + define void @test_store_i64(i32, i64 %val, i64* %addr) { ret void } +... +--- +name: test_store_i8 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $w0, $w1, $x2 + + ; CHECK-LABEL: name: test_store_i8 + ; CHECK: liveins: $w0, $w1, $x2 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG]].sub_32 + ; CHECK: early-clobber %5:gpr32 = STXRB [[COPY2]], [[COPY1]] :: (volatile store 1 into %ir.addr) + ; CHECK: $w0 = COPY %5 + ; CHECK: RET_ReallyLR implicit $w0 + %3:gpr(s32) = COPY $w1 + %2:gpr(p0) = COPY $x2 + %6:gpr(s64) = G_CONSTANT i64 255 + %7:gpr(s64) = G_ANYEXT %3(s32) + %4:gpr(s64) = G_AND %7, %6 + %5:gpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.stxr), %4(s64), %2(p0) :: (volatile store 1 into %ir.addr) + $w0 = COPY %5(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: test_store_i16 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $w0, $w1, $x2 + + ; CHECK-LABEL: name: test_store_i16 + ; CHECK: liveins: $w0, $w1, $x2 + ; CHECK: [[COPY:%[0-9]+]]:gpr32all = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY [[SUBREG_TO_REG]].sub_32 + ; CHECK: early-clobber %5:gpr32 = STXRH [[COPY2]], [[COPY1]] :: (volatile store 2 into %ir.addr) + ; CHECK: $w0 = COPY %5 + ; CHECK: RET_ReallyLR implicit $w0 + %3:gpr(s32) = COPY $w1 + %2:gpr(p0) = COPY $x2 + %6:gpr(s64) = G_CONSTANT i64 65535 + %7:gpr(s64) = G_ANYEXT %3(s32) + %4:gpr(s64) = G_AND %7, %6 + %5:gpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.stxr), %4(s64), %2(p0) :: (volatile store 2 into %ir.addr) + $w0 = COPY %5(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: test_store_i32 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $w0, $w1, $x2 + + ; CHECK-LABEL: name: test_store_i32 + ; CHECK: liveins: $w0, $w1, $x2 + ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2 + ; CHECK: early-clobber %3:gpr32 = STXRW [[COPY]], [[COPY1]] :: (volatile store 4 into %ir.addr) + ; CHECK: $w0 = COPY %3 + ; CHECK: RET_ReallyLR implicit $w0 + %1:gpr(s32) = COPY $w1 + %2:gpr(p0) = COPY $x2 + %3:gpr(s64) = G_ZEXT %1(s32) + %4:gpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.stxr), %3(s64), %2(p0) :: (volatile store 4 into %ir.addr) + $w0 = COPY %4(s32) + RET_ReallyLR implicit $w0 + +... +--- +name: test_store_i64 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $w0, $x1, $x2 + + ; CHECK-LABEL: name: test_store_i64 + ; CHECK: liveins: $w0, $x1, $x2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x2 + ; CHECK: early-clobber %2:gpr32 = STXRX [[COPY]], [[COPY1]] :: (volatile store 8 into %ir.addr) + ; CHECK: $w0 = COPY %2 + ; CHECK: RET_ReallyLR implicit $w0 + %1:gpr(s64) = COPY $x1 + %2:gpr(p0) = COPY $x2 + %3:gpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.stxr), %1(s64), %2(p0) :: (volatile store 8 into %ir.addr) + $w0 = COPY %3(s32) + RET_ReallyLR implicit $w0 + +... Index: llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll =================================================================== --- llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll +++ llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll @@ -91,39 +91,57 @@ declare i64 @llvm.aarch64.ldxr.p0i32(i32*) nounwind declare i64 @llvm.aarch64.ldxr.p0i64(i64*) nounwind +; FALLBACK-NOT: remark:{{.*}}test_store_i8 define i32 @test_store_i8(i32, i8 %val, i8* %addr) { ; CHECK-LABEL: test_store_i8: ; CHECK-NOT: uxtb ; CHECK-NOT: and ; CHECK: stxrb w0, w1, [x2] +; GISEL-LABEL: test_store_i8: +; GISEL-NOT: uxtb +; GISEL-NOT: and +; GISEL: stxrb w0, w1, [x2] %extval = zext i8 %val to i64 %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* %addr) ret i32 %res } +; FALLBACK-NOT: remark:{{.*}}test_store_i16 define i32 @test_store_i16(i32, i16 %val, i16* %addr) { ; CHECK-LABEL: test_store_i16: ; CHECK-NOT: uxth ; CHECK-NOT: and ; CHECK: stxrh w0, w1, [x2] +; GISEL-LABEL: test_store_i16: +; GISEL-NOT: uxth +; GISEL-NOT: and +; GISEL: stxrh w0, w1, [x2] %extval = zext i16 %val to i64 %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* %addr) ret i32 %res } +; FALLBACK-NOT: remark:{{.*}}test_store_i32 define i32 @test_store_i32(i32, i32 %val, i32* %addr) { ; CHECK-LABEL: test_store_i32: ; CHECK-NOT: uxtw ; CHECK-NOT: and ; CHECK: stxr w0, w1, [x2] +; GISEL-LABEL: test_store_i32: +; GISEL-NOT: uxtw +; GISEL-NOT: and +; GISEL: stxr w0, w1, [x2] %extval = zext i32 %val to i64 %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* %addr) ret i32 %res } +; FALLBACK-NOT: remark:{{.*}}test_store_i64 define i32 @test_store_i64(i32, i64 %val, i64* %addr) { ; CHECK-LABEL: test_store_i64: ; CHECK: stxr w0, x1, [x2] +; GISEL-LABEL: test_store_i64: +; GISEL: stxr w0, x1, [x2] %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* %addr) ret i32 %res }