Index: llvm/include/llvm/CodeGen/TargetInstrInfo.h =================================================================== --- llvm/include/llvm/CodeGen/TargetInstrInfo.h +++ llvm/include/llvm/CodeGen/TargetInstrInfo.h @@ -1235,6 +1235,10 @@ /// Get the base operand and byte offset of an instruction that reads/writes /// memory. + /// It returns false if MI does not read/write memory. + /// It returns false if no base operand and offset was found. + /// It is not guaranteed to always recognize base operand and offsets in all + /// cases. virtual bool getMemOperandWithOffset(const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset, Index: llvm/lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1982,6 +1982,9 @@ const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { + if (!LdSt.mayLoadOrStore()) + return false; + unsigned Width; return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI); } @@ -2026,9 +2029,8 @@ Offset = LdSt.getOperand(3).getImm() * Scale; } - assert((BaseOp->isReg() || BaseOp->isFI()) && - "getMemOperandWithOffset only supports base " - "operands of type register or frame index."); + if (!BaseOp->isReg() && !BaseOp->isFI()) + return false; return true; } Index: llvm/lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -260,6 +260,9 @@ const MachineOperand *&BaseOp, int64_t &Offset, const TargetRegisterInfo *TRI) const { + if (!LdSt.mayLoadOrStore()) + return false; + unsigned Opc = LdSt.getOpcode(); if (isDS(LdSt)) { @@ -270,12 +273,11 @@ BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); // TODO: ds_consume/ds_append use M0 for the base address. Is it safe to // report that here? - if (!BaseOp) + if (!BaseOp || !BaseOp->isReg()) return false; Offset = OffsetImm->getImm(); - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + return true; } @@ -307,9 +309,11 @@ EltSize *= 64; BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); + if (!BaseOp->isReg()) + return false; + Offset = EltSize * Offset0; - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + return true; } @@ -346,12 +350,12 @@ getNamedOperand(LdSt, AMDGPU::OpName::offset); BaseOp = AddrReg; Offset = OffsetImm->getImm(); - if (SOffset) // soffset can be an inline immediate. Offset += SOffset->getImm(); - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + if (!BaseOp->isReg()) + return false; + return true; } @@ -364,8 +368,9 @@ const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); BaseOp = SBaseReg; Offset = OffsetImm->getImm(); - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + if (!BaseOp->isReg()) + return false; + return true; } @@ -383,8 +388,8 @@ } Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + if (!BaseOp->isReg()) + return false; return true; } Index: llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp =================================================================== --- llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -2945,10 +2945,7 @@ const TargetRegisterInfo *TRI) const { unsigned AccessSize = 0; BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize); - assert((!BaseOp || BaseOp->isReg()) && - "getMemOperandWithOffset only supports base " - "operands of type register."); - return BaseOp != nullptr; + return BaseOp != nullptr && BaseOp->isReg(); } /// Can these instructions execute at the same time in a bundle. Index: llvm/lib/Target/Lanai/LanaiInstrInfo.cpp =================================================================== --- llvm/lib/Target/Lanai/LanaiInstrInfo.cpp +++ llvm/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -788,8 +788,10 @@ BaseOp = &LdSt.getOperand(1); Offset = LdSt.getOperand(2).getImm(); - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + + if (!BaseOp->isReg()) + return false; + return true; } Index: llvm/lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -4263,12 +4263,10 @@ // Return true if get the base operand, byte offset of an instruction and the // memory width. Width is the size of memory that is being loaded/stored. bool PPCInstrInfo::getMemOperandWithOffsetWidth( - const MachineInstr &LdSt, - const MachineOperand *&BaseReg, - int64_t &Offset, - unsigned &Width, - const TargetRegisterInfo *TRI) const { - assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); + const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, + unsigned &Width, const TargetRegisterInfo *TRI) const { + if (!LdSt.mayLoadOrStore()) + return false; // Handle only loads/stores with base register followed by immediate offset. if (LdSt.getNumExplicitOperands() != 3) Index: llvm/lib/Target/RISCV/RISCVInstrInfo.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -548,7 +548,8 @@ bool RISCVInstrInfo::getMemOperandWithOffsetWidth( const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const { - assert(LdSt.mayLoadOrStore() && "Expected a memory operation."); + if (!LdSt.mayLoadOrStore()) + return false; // Here we assume the standard RISC-V ISA, which uses a base+offset // addressing mode. You'll need to relax these conditions to support custom Index: llvm/lib/Target/X86/X86InstrInfo.cpp =================================================================== --- llvm/lib/Target/X86/X86InstrInfo.cpp +++ llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3218,8 +3218,9 @@ Offset = DispMO.getImm(); - assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " - "operands of type register."); + if (!BaseOp->isReg()) + return false; + return true; } Index: llvm/test/CodeGen/AArch64/machine-sink-getmemoperandwithoffset.mir =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/machine-sink-getmemoperandwithoffset.mir @@ -0,0 +1,65 @@ +# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass machine-sink -o - %s | FileCheck %s +--- | + define i8 @g() { + else.7: + br i1 undef, label %then.8, label %else.8, !make.implicit !0 + + then.8: ; preds = %else.8, %else.7 + %merge = phi i8 [ 1, %else.7 ], [ 0, %else.8 ] + ret i8 %merge ;1 ;%merge + + else.8: ; preds = %else.7 + %icmp.8 = icmp eq i64 undef, undef + ; ret i8 0 ; added + br i1 %icmp.8, label %else.11, label %then.8 + + else.11: ; preds = %else.8 + ret i8 undef + } + + !0 = !{} +... +--- +name: g +tracksRegLiveness: true +registers: + - { id: 0, class: gpr32all } + - { id: 1, class: gpr32all } + - { id: 2, class: gpr32 } + - { id: 3, class: gpr32 } + - { id: 4, class: gpr32all } + - { id: 5, class: gpr32 } + - { id: 6, class: gpr32all } +body: | + ; Just check that the pass didn't crash/assert. + ; CHECK-LABEL: name: g + bb.0.else.7: + successors: %bb.1, %bb.2 + + %2:gpr32 = MOVi32imm 1 + ; Sinking the below COPY instruction caused an assert to trigger before + ; requiring getMemOperandWithOffset to return false rather than assert + ; when handling non-memory operations. + %1:gpr32all = COPY %2 + %3:gpr32 = COPY $wzr + CBNZW %3, %bb.2 + B %bb.1 + + bb.1.then.8: + %0:gpr32all = PHI %1, %bb.0, %4, %bb.2 + $w0 = COPY %0 + RET_ReallyLR implicit $w0 + + bb.2.else.8: + successors: %bb.3, %bb.1 + + %5:gpr32 = COPY $wzr + %4:gpr32all = COPY %5 + CBNZW %5, %bb.1 + B %bb.3 + + bb.3.else.11: + %6:gpr32all = IMPLICIT_DEF + $w0 = COPY %6 + RET_ReallyLR implicit $w0 +...