diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h --- a/llvm/include/llvm/IR/Instruction.h +++ b/llvm/include/llvm/IR/Instruction.h @@ -640,6 +640,9 @@ /// Return true if this instruction has a volatile memory access. bool isVolatile() const LLVM_READONLY; + /// Return the type this instruction accesses in memory, if any. + Type *getAccessType() const LLVM_READONLY; + /// Return true if this instruction may throw an exception. /// /// If IncludePhaseOneUnwind is set, this will also include cases where diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -743,6 +743,41 @@ } } +Type *Instruction::getAccessType() const { + switch (getOpcode()) { + case Instruction::Store: + return getOperand(0)->getType(); + case Instruction::Load: + case Instruction::AtomicRMW: + case Instruction::AtomicCmpXchg: + return getType(); + case Instruction::Call: + case Instruction::Invoke: + if (const IntrinsicInst *II = dyn_cast(this)) { + switch (II->getIntrinsicID()) { + case Intrinsic::masked_load: + case Intrinsic::masked_gather: + case Intrinsic::masked_expandload: + case Intrinsic::vp_load: + case Intrinsic::vp_gather: + case Intrinsic::experimental_vp_strided_load: + return II->getType(); + case Intrinsic::masked_store: + case Intrinsic::masked_scatter: + case Intrinsic::masked_compressstore: + case Intrinsic::vp_store: + case Intrinsic::vp_scatter: + case Intrinsic::experimental_vp_strided_store: + return II->getOperand(0)->getType(); + default: + break; + } + } + } + + return nullptr; +} + static bool canUnwindPastLandingPad(const LandingPadInst *LP, bool IncludePhaseOneUnwind) { // Because phase one unwinding skips cleanup landingpads, we effectively