Index: include/llvm/CodeGen/TargetLowering.h =================================================================== --- include/llvm/CodeGen/TargetLowering.h +++ include/llvm/CodeGen/TargetLowering.h @@ -1440,6 +1440,9 @@ /// require a more complex expansion. unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; } + /// Whether the target supports unaligned atomic operations. + bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; } + /// Whether AtomicExpandPass should automatically insert fences and reduce /// ordering for this atomic. This should be true for most architectures with /// weak memory ordering. Defaults to false. @@ -1845,11 +1848,16 @@ MaxAtomicSizeInBitsSupported = SizeInBits; } - // Sets the minimum cmpxchg or ll/sc size supported by the backend. + /// Sets the minimum cmpxchg or ll/sc size supported by the backend. void setMinCmpXchgSizeInBits(unsigned SizeInBits) { MinCmpXchgSizeInBits = SizeInBits; } + /// Sets whether unaligned atomic operations are supported. + void setSupportsUnalignedAtomics(bool UnalignedSupported) { + SupportsUnalignedAtomics = UnalignedSupported; + } + public: //===--------------------------------------------------------------------===// // Addressing mode description hooks (used by LSR etc). @@ -2331,6 +2339,9 @@ /// backend supports. unsigned MinCmpXchgSizeInBits; + /// This indicates if the target supports unaligned atomic operations. + bool SupportsUnalignedAtomics; + /// If set to a physical register, this specifies the register that /// llvm.savestack/llvm.restorestack should save and restore. unsigned StackPointerRegisterToSaveRestore; Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4138,7 +4138,8 @@ const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); - if (I.getAlignment() < VT.getStoreSize()) + if (!TLI.supportsUnalignedAtomics() && + I.getAlignment() < VT.getStoreSize()) report_fatal_error("Cannot generate unaligned atomic load"); MachineMemOperand *MMO = Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -520,6 +520,7 @@ MaxAtomicSizeInBitsSupported = 1024; MinCmpXchgSizeInBits = 0; + SupportsUnalignedAtomics = false; std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr); Index: lib/Target/AVR/AVRISelLowering.cpp =================================================================== --- lib/Target/AVR/AVRISelLowering.cpp +++ lib/Target/AVR/AVRISelLowering.cpp @@ -44,6 +44,7 @@ setBooleanVectorContents(ZeroOrOneBooleanContent); setSchedulingPreference(Sched::RegPressure); setStackPointerRegisterToSaveRestore(AVR::SP); + setSupportsUnalignedAtomics(true); setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); setOperationAction(ISD::BlockAddress, MVT::i16, Custom); Index: test/CodeGen/AVR/unaligned-atomic-loads.ll =================================================================== --- /dev/null +++ test/CodeGen/AVR/unaligned-atomic-loads.ll @@ -0,0 +1,19 @@ +; RUN: llc -mattr=addsubiw < %s -march=avr | FileCheck %s + +; This verifies that the middle end can handle an unaligned atomic load. +; +; In the past, an assertion inside the SelectionDAGBuilder would always +; hit an assertion for unaligned loads and stores. + +%AtomicI16 = type { %CellI16, [0 x i8] } +%CellI16 = type { i16, [0 x i8] } + +; CHECK-LABEL: foo +; CHECK: ret +define void @foo(%AtomicI16* %self) { +start: + %a = getelementptr inbounds %AtomicI16, %AtomicI16* %self, i16 0, i32 0, i32 0 + load atomic i16, i16* %a seq_cst, align 1 + ret void +} +