Index: llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp +++ llvm/trunk/lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -1740,7 +1740,14 @@ auto &MemOp = **I.memoperands_begin(); if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) { - LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n"); + // For now we just support s8 acquire loads to be able to compile stack + // protector code. + if (MemOp.getOrdering() == AtomicOrdering::Acquire && + MemOp.getSize() == 1) { + I.setDesc(TII.get(AArch64::LDARB)); + return constrainSelectedInstRegOperands(I, TII, TRI, RBI); + } + LLVM_DEBUG(dbgs() << "Atomic load/store not fully supported yet\n"); return false; } unsigned MemSizeInBits = MemOp.getSize() * 8; Index: llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir =================================================================== --- llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir +++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/select-atomic-load-store.mir @@ -0,0 +1,37 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=aarch64-- -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s +--- | + target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + target triple = "aarch64" + + define i8 @load_acq_i8(i8* %ptr) { + %v = load atomic i8, i8* %ptr acquire, align 8 + ret i8 %v + } + +... +--- +name: load_acq_i8 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +liveins: + - { reg: '$x0' } +machineFunctionInfo: {} +body: | + bb.1: + liveins: $x0 + + ; CHECK-LABEL: name: load_acq_i8 + ; CHECK: liveins: $x0 + ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 + ; CHECK: [[LDARB:%[0-9]+]]:gpr32 = LDARB [[COPY]] :: (load acquire 1 from %ir.ptr, align 8) + ; CHECK: $w0 = COPY [[LDARB]] + ; CHECK: RET_ReallyLR implicit $w0 + %0:gpr(p0) = COPY $x0 + %2:gpr(s32) = G_LOAD %0(p0) :: (load acquire 1 from %ir.ptr, align 8) + $w0 = COPY %2(s32) + RET_ReallyLR implicit $w0 + +...