diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -10080,7 +10080,8 @@ assert(LD->isUnindexed() && "Loads should be unindexed at this point."); if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && - !Subtarget->isThumb1Only() && LD->isVolatile()) { + !Subtarget->isThumb1Only() && LD->isVolatile() && + LD->getAlign() >= Align(Subtarget->hasV6Ops() ? 4 : 8)) { SDLoc dl(N); SDValue Result = DAG.getMemIntrinsicNode( ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}), @@ -10137,7 +10138,8 @@ assert(ST->isUnindexed() && "Stores should be unindexed at this point."); if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() && - !Subtarget->isThumb1Only() && ST->isVolatile()) { + !Subtarget->isThumb1Only() && ST->isVolatile() && + ST->getAlign() >= Align(Subtarget->hasV6Ops() ? 4 : 8)) { SDNode *N = Op.getNode(); SDLoc dl(N); diff --git a/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll b/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll --- a/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll +++ b/llvm/test/CodeGen/ARM/i64_volatile_load_store.ll @@ -5,6 +5,9 @@ @x = common dso_local global i64 0, align 8 @y = common dso_local global i64 0, align 8 +@x_unaligned = common dso_local global i64 0, align 1 +@y_unaligned = common dso_local global i64 0, align 1 + define void @test() { entry: ; CHECK-LABEL: test: @@ -29,6 +32,34 @@ ret void } +define void @test_unaligned() { +entry: +; CHECK-LABEL: test_unaligned: +; CHECK-ARMV5TE: ldr [[ADDR0:r[0-9]+]] +; CHECK-ARMV5TE-NEXT: ldr [[ADDR1:r[0-9]+]] +; CHECK-ARMV5TE-NEXT: ldr [[R1:r[0-9]+]], [[[ADDR0]]] +; CHECK-ARMV5TE-NEXT: ldr [[R0:r[0-9]+]], [[[ADDR0]], #4] +; CHECK-ARMV5TE-NEXT: str [[R0]], [[[ADDR1]], #4] +; CHECK-ARMV5TE-NEXT: str [[R1]], [[[ADDR1]]] +; CHECK-T2: movw [[ADDR0:r[0-9]+]], :lower16:x_unaligned +; CHECK-T2-NEXT: movw [[ADDR1:r[0-9]+]], :lower16:y_unaligned +; CHECK-T2-NEXT: movt [[ADDR0]], :upper16:x_unaligned +; CHECK-T2-NEXT: movt [[ADDR1]], :upper16:y_unaligned +; CHECK-T2-NEXT: ldr [[R1]], [[[ADDR0]]] +; CHECK-T2-NEXT: ldr [[R0]], [[[ADDR0]], #4] +; CHECK-T2-NEXT: str [[R0]], [[[ADDR1]], #4] +; CHECK-T2-NEXT: str [[R1]], [[[ADDR1]]] +; CHECK-ARMV4T: ldr [[ADDR0:r[0-9]+]] +; CHECK-ARMV4T-NEXT: ldr [[ADDR1:r[0-9]+]] +; CHECK-ARMV4T-NEXT: ldr [[R1:r[0-9]+]], [[[ADDR0]]] +; CHECK-ARMV4T-NEXT: ldr [[R0:r[0-9]+]], [[[ADDR0]], #4] +; CHECK-ARMV4T-NEXT: str [[R0]], [[[ADDR1]], #4] +; CHECK-ARMV4T-NEXT: str [[R1]], [[[ADDR1]]] + %0 = load volatile i64, ptr @x_unaligned, align 1 + store volatile i64 %0, ptr @y_unaligned, align 1 + ret void +} + define void @test_offset() { entry: ; CHECK-LABEL: test_offset: