Index: llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -1186,8 +1186,10 @@ // store instruction writes and the stored value is not modified, we can // promote the load. Since we do not handle stores with pre-/post-index, // it's unnecessary to check if BaseReg is modified by the store itself. + // And make sure the store instruction is in 'reg+imm' form as the load + // instruction. if (MI.mayStore() && isMatchingStore(LoadMI, MI) && - BaseReg == getLdStBaseOp(MI).getReg() && + BaseReg == getLdStBaseOp(MI).getReg() && getLdStOffsetOp(MI).isImm() && isLdOffsetInRangeOfSt(LoadMI, MI, TII) && ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) { StoreI = MBBI; Index: llvm/test/CodeGen/AArch64/arm64-ldst-opt-match.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/arm64-ldst-opt-match.ll @@ -0,0 +1,30 @@ +; REQUIRES: asserts +; RUN: llc < %s -mtriple=aarch64 -verify-machineinstrs | FileCheck %s + +@g = common dso_local global i32 0, align 4 + +; This test is checking that llc doesn't crash. +; CHECK-LABEL: @test +; CHECK: // %for.end +; CHECK: str +; CHECK: ldr +define i32 @test() { +entry: + br label %for.cond + +for.cond: ; preds = %for.cond.backedge, %entry + br label %for.end + +for.end: ; preds = %for.cond + store i32 0, i32* @g, align 4 + %0 = load i32, i32* undef, align 4 + %toboo = icmp eq i32 %0, 0 + br i1 %toboo, label %for.inc, label %exit + +for.inc: ; preds = %for.inc + br label %for.cond + +exit: ; preds = %for.end + ret i32 0 +} +