Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -243,11 +243,17 @@ SmallVector DstRegs; for (int i = 0; i < NumParts; ++i) { unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy); - unsigned SrcReg = MRI.createGenericVirtualRegister(NarrowPtrTy); - unsigned Offset = MRI.createGenericVirtualRegister(LLT::scalar(64)); + unsigned SrcReg = 0; + unsigned Adjustment = i * NarrowSize / 8; + + if (Adjustment != 0) { + unsigned Offset = MRI.createGenericVirtualRegister(LLT::scalar(64)); + SrcReg = MRI.createGenericVirtualRegister(NarrowPtrTy); + MIRBuilder.buildConstant(Offset, Adjustment); + MIRBuilder.buildGEP(SrcReg, MI.getOperand(1).getReg(), Offset); + } else + SrcReg = MI.getOperand(1).getReg(); - MIRBuilder.buildConstant(Offset, i * NarrowSize / 8); - MIRBuilder.buildGEP(SrcReg, MI.getOperand(1).getReg(), Offset); // TODO: This is conservatively correct, but we probably want to split the // memory operands in the future. MIRBuilder.buildLoad(DstReg, SrcReg, **MI.memoperands_begin()); @@ -270,10 +276,17 @@ extractParts(MI.getOperand(0).getReg(), NarrowTy, NumParts, SrcRegs); for (int i = 0; i < NumParts; ++i) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowPtrTy); - unsigned Offset = MRI.createGenericVirtualRegister(LLT::scalar(64)); - MIRBuilder.buildConstant(Offset, i * NarrowSize / 8); - MIRBuilder.buildGEP(DstReg, MI.getOperand(1).getReg(), Offset); + unsigned DstReg = 0; + unsigned Adjustment = i * NarrowSize / 8; + + if (Adjustment != 0) { + unsigned Offset = MRI.createGenericVirtualRegister(LLT::scalar(64)); + DstReg = MRI.createGenericVirtualRegister(NarrowPtrTy); + MIRBuilder.buildConstant(Offset, Adjustment); + MIRBuilder.buildGEP(DstReg, MI.getOperand(1).getReg(), Offset); + } else + DstReg = MI.getOperand(1).getReg(); + // TODO: This is conservatively correct, but we probably want to split the // memory operands in the future. MIRBuilder.buildStore(SrcRegs[i], DstReg, **MI.memoperands_begin()); Index: test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir @@ -53,9 +53,7 @@ ; CHECK: %7(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr) %7(<2 x s32>) = G_LOAD %0(p0) :: (load 8 from %ir.addr) - ; CHECK: [[OFFSET0:%[0-9]+]](s64) = G_CONSTANT i64 0 - ; CHECK: [[GEP0:%[0-9]+]](p0) = G_GEP %0, [[OFFSET0]](s64) - ; CHECK: [[LOAD0:%[0-9]+]](s64) = G_LOAD [[GEP0]](p0) :: (load 16 from %ir.addr) + ; CHECK: [[LOAD0:%[0-9]+]](s64) = G_LOAD %0(p0) :: (load 16 from %ir.addr) ; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 8 ; CHECK: [[GEP1:%[0-9]+]](p0) = G_GEP %0, [[OFFSET1]](s64) ; CHECK: [[LOAD1:%[0-9]+]](s64) = G_LOAD [[GEP1]](p0) :: (load 16 from %ir.addr) @@ -105,9 +103,7 @@ ; CHECK: G_STORE %0(p0), %0(p0) :: (store 8 into %ir.addr) G_STORE %0(p0), %0(p0) :: (store 8 into %ir.addr) - ; CHECK: [[OFFSET0:%[0-9]+]](s64) = G_CONSTANT i64 0 - ; CHECK: [[GEP0:%[0-9]+]](p0) = G_GEP %0, [[OFFSET0]](s64) - ; CHECK: G_STORE %5(s64), [[GEP0]](p0) :: (store 16 into %ir.addr) + ; CHECK: G_STORE %5(s64), %0(p0) :: (store 16 into %ir.addr) ; CHECK: [[OFFSET1:%[0-9]+]](s64) = G_CONSTANT i64 8 ; CHECK: [[GEP1:%[0-9]+]](p0) = G_GEP %0, [[OFFSET1]](s64) ; CHECK: G_STORE %6(s64), [[GEP1]](p0) :: (store 16 into %ir.addr)