diff --git a/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/Transforms/Instrumentation/AddressSanitizerCommon.h @@ -0,0 +1,49 @@ +//===--------- Definition of the AddressSanitizer class ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares common infrastructure for AddressSanitizer and +// HWAddressSanitizer. +// +//===----------------------------------------------------------------------===// +#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H +#define LLVM_TRANSFORMS_INSTRUMENTATION_ADDRESSSANITIZERCOMMON_H + +#include "llvm/IR/Instruction.h" +#include "llvm/IR/Module.h" + +namespace llvm { + +class InterestingMemoryOperand { +public: + Use *PtrUse; + bool IsWrite; + Type *Type; + uint64_t TypeSize; + unsigned Alignment; + // The mask Value, if we're looking at a masked load/store. + Value *MaybeMask; + + InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite, + class Type *Type, unsigned Alignment, + Value *MaybeMask = nullptr) + : IsWrite(IsWrite), Type(Type), Alignment(Alignment), + MaybeMask(MaybeMask) { + const DataLayout &DL = I->getModule()->getDataLayout(); + TypeSize = DL.getTypeStoreSizeInBits(Type); + PtrUse = &I->getOperandUse(OperandNo); + } + + Instruction *getInsn() { return cast(PtrUse->getUser()); } + + Value *getPtr() { return PtrUse->get(); } +}; + +} // namespace llvm + +#endif diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -69,6 +69,7 @@ #include "llvm/Support/ScopedPrinter.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" #include "llvm/Transforms/Utils/ASanStackFrameLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" @@ -612,16 +613,13 @@ /// Check if we want (and can) handle this alloca. bool isInterestingAlloca(const AllocaInst &AI); - /// If it is an interesting memory access, return the PointerOperand - /// and set IsWrite/Alignment. Otherwise return nullptr. - /// MaybeMask is an output parameter for the mask Value, if we're looking at a - /// masked load/store. - Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, - uint64_t *TypeSize, unsigned *Alignment, - Value **MaybeMask = nullptr); + bool ignoreAccess(Value *Ptr); + void getInterestingMemoryOperands( + Instruction *I, SmallVectorImpl &Interesting); - void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, Instruction *I, - bool UseCalls, const DataLayout &DL); + void instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, + InterestingMemoryOperand &O, bool UseCalls, + const DataLayout &DL); void instrumentPointerComparisonOrSubtraction(Instruction *I); void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, Value *Addr, uint32_t TypeSize, bool IsWrite, @@ -1340,98 +1338,84 @@ return IsInteresting; } -Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I, - bool *IsWrite, - uint64_t *TypeSize, - unsigned *Alignment, - Value **MaybeMask) { +bool AddressSanitizer::ignoreAccess(Value *Ptr) { + // Do not instrument acesses from different address spaces; we cannot deal + // with them. + Type *PtrTy = cast(Ptr->getType()->getScalarType()); + if (PtrTy->getPointerAddressSpace() != 0) + return true; + + // Ignore swifterror addresses. + // swifterror memory addresses are mem2reg promoted by instruction + // selection. As such they cannot have regular uses like an instrumentation + // function and it makes no sense to track them as memory. + if (Ptr->isSwiftError()) + return true; + + // Treat memory accesses to promotable allocas as non-interesting since they + // will not cause memory violations. This greatly speeds up the instrumented + // executable at -O0. + if (auto AI = dyn_cast_or_null(Ptr)) + if (ClSkipPromotableAllocas && !isInterestingAlloca(*AI)) + return true; + + return false; +} + +void AddressSanitizer::getInterestingMemoryOperands( + Instruction *I, SmallVectorImpl &Interesting) { // Skip memory accesses inserted by another instrumentation. - if (I->hasMetadata("nosanitize")) return nullptr; + if (I->hasMetadata("nosanitize")) + return; // Do not instrument the load fetching the dynamic shadow address. if (LocalDynamicShadow == I) - return nullptr; + return; - Value *PtrOperand = nullptr; - const DataLayout &DL = I->getModule()->getDataLayout(); if (LoadInst *LI = dyn_cast(I)) { - if (!ClInstrumentReads) return nullptr; - *IsWrite = false; - *TypeSize = DL.getTypeStoreSizeInBits(LI->getType()); - *Alignment = LI->getAlignment(); - PtrOperand = LI->getPointerOperand(); + if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) + return; + Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, + LI->getType(), LI->getAlignment()); } else if (StoreInst *SI = dyn_cast(I)) { - if (!ClInstrumentWrites) return nullptr; - *IsWrite = true; - *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType()); - *Alignment = SI->getAlignment(); - PtrOperand = SI->getPointerOperand(); + if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) + return; + Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, + SI->getValueOperand()->getType(), + SI->getAlignment()); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { - if (!ClInstrumentAtomics) return nullptr; - *IsWrite = true; - *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); - *Alignment = 0; - PtrOperand = RMW->getPointerOperand(); + if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) + return; + Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, + RMW->getValOperand()->getType(), 0); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { - if (!ClInstrumentAtomics) return nullptr; - *IsWrite = true; - *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType()); - *Alignment = 0; - PtrOperand = XCHG->getPointerOperand(); + if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) + return; + Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, + XCHG->getCompareOperand()->getType(), 0); } else if (auto CI = dyn_cast(I)) { auto *F = dyn_cast(CI->getCalledValue()); if (F && (F->getName().startswith("llvm.masked.load.") || F->getName().startswith("llvm.masked.store."))) { - unsigned OpOffset = 0; - if (F->getName().startswith("llvm.masked.store.")) { - if (!ClInstrumentWrites) - return nullptr; - // Masked store has an initial operand for the value. - OpOffset = 1; - *IsWrite = true; - } else { - if (!ClInstrumentReads) - return nullptr; - *IsWrite = false; - } - - auto BasePtr = CI->getOperand(0 + OpOffset); + bool IsWrite = F->getName().startswith("llvm.masked.store."); + // Masked store has an initial operand for the value. + unsigned OpOffset = IsWrite ? 1 : 0; + if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads) + return; + + auto BasePtr = CI->getOperand(OpOffset); + if (ignoreAccess(BasePtr)) + return; auto Ty = cast(BasePtr->getType())->getElementType(); - *TypeSize = DL.getTypeStoreSizeInBits(Ty); + unsigned Alignment = 1; + // Otherwise no alignment guarantees. We probably got Undef. if (auto AlignmentConstant = dyn_cast(CI->getOperand(1 + OpOffset))) - *Alignment = (unsigned)AlignmentConstant->getZExtValue(); - else - *Alignment = 1; // No alignment guarantees. We probably got Undef - if (MaybeMask) - *MaybeMask = CI->getOperand(2 + OpOffset); - PtrOperand = BasePtr; + Alignment = (unsigned)AlignmentConstant->getZExtValue(); + Value *Mask = CI->getOperand(2 + OpOffset); + Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask); } } - - if (PtrOperand) { - // Do not instrument acesses from different address spaces; we cannot deal - // with them. - Type *PtrTy = cast(PtrOperand->getType()->getScalarType()); - if (PtrTy->getPointerAddressSpace() != 0) - return nullptr; - - // Ignore swifterror addresses. - // swifterror memory addresses are mem2reg promoted by instruction - // selection. As such they cannot have regular uses like an instrumentation - // function and it makes no sense to track them as memory. - if (PtrOperand->isSwiftError()) - return nullptr; - } - - // Treat memory accesses to promotable allocas as non-interesting since they - // will not cause memory violations. This greatly speeds up the instrumented - // executable at -O0. - if (ClSkipPromotableAllocas) - if (auto AI = dyn_cast_or_null(PtrOperand)) - return isInterestingAlloca(*AI) ? AI : nullptr; - - return PtrOperand; } static bool isPointerOperand(Value *V) { @@ -1546,15 +1530,9 @@ } void AddressSanitizer::instrumentMop(ObjectSizeOffsetVisitor &ObjSizeVis, - Instruction *I, bool UseCalls, + InterestingMemoryOperand &O, bool UseCalls, const DataLayout &DL) { - bool IsWrite = false; - unsigned Alignment = 0; - uint64_t TypeSize = 0; - Value *MaybeMask = nullptr; - Value *Addr = - isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask); - assert(Addr); + Value *Addr = O.getPtr(); // Optimization experiments. // The experiments can be used to evaluate potential optimizations that remove @@ -1574,7 +1552,7 @@ // dynamically initialized global is always valid. GlobalVariable *G = dyn_cast(GetUnderlyingObject(Addr, DL)); if (G && (!ClInitializers || GlobalIsLinkerInitialized(G)) && - isSafeAccess(ObjSizeVis, Addr, TypeSize)) { + isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { NumOptimizedAccessesToGlobalVar++; return; } @@ -1583,25 +1561,26 @@ if (ClOpt && ClOptStack) { // A direct inbounds access to a stack variable is always valid. if (isa(GetUnderlyingObject(Addr, DL)) && - isSafeAccess(ObjSizeVis, Addr, TypeSize)) { + isSafeAccess(ObjSizeVis, Addr, O.TypeSize)) { NumOptimizedAccessesToStackVar++; return; } } - if (IsWrite) + if (O.IsWrite) NumInstrumentedWrites++; else NumInstrumentedReads++; unsigned Granularity = 1 << Mapping.Scale; - if (MaybeMask) { - instrumentMaskedLoadOrStore(this, DL, IntptrTy, MaybeMask, I, Addr, - Alignment, Granularity, TypeSize, IsWrite, - nullptr, UseCalls, Exp); + if (O.MaybeMask) { + instrumentMaskedLoadOrStore(this, DL, IntptrTy, O.MaybeMask, O.getInsn(), + Addr, O.Alignment, Granularity, O.TypeSize, + O.IsWrite, nullptr, UseCalls, Exp); } else { - doInstrumentAddress(this, I, I, Addr, Alignment, Granularity, TypeSize, - IsWrite, nullptr, UseCalls, Exp); + doInstrumentAddress(this, O.getInsn(), O.getInsn(), Addr, O.Alignment, + Granularity, O.TypeSize, O.IsWrite, nullptr, UseCalls, + Exp); } } @@ -2651,15 +2630,12 @@ // We want to instrument every address only once per basic block (unless there // are calls between uses). SmallPtrSet TempsToInstrument; - SmallVector ToInstrument; + SmallVector OperandsToInstrument; SmallVector IntrinToInstrument; SmallVector NoReturnCalls; SmallVector AllBlocks; SmallVector PointerComparisonsOrSubtracts; int NumAllocas = 0; - bool IsWrite; - unsigned Alignment; - uint64_t TypeSize; // Fill the set of memory operations to instrument. for (auto &BB : F) { @@ -2668,32 +2644,36 @@ int NumInsnsPerBB = 0; for (auto &Inst : BB) { if (LooksLikeCodeInBug11395(&Inst)) return false; - Value *MaybeMask = nullptr; - if (Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, - &Alignment, &MaybeMask)) { - if (ClOpt && ClOptSameTemp) { - // If we have a mask, skip instrumentation if we've already - // instrumented the full object. But don't add to TempsToInstrument - // because we might get another load/store with a different mask. - if (MaybeMask) { - if (TempsToInstrument.count(Addr)) - continue; // We've seen this (whole) temp in the current BB. - } else { - if (!TempsToInstrument.insert(Addr).second) - continue; // We've seen this temp in the current BB. + SmallVector InterestingOperands; + getInterestingMemoryOperands(&Inst, InterestingOperands); + + if (!InterestingOperands.empty()) { + for (auto &Operand : InterestingOperands) { + if (ClOpt && ClOptSameTemp) { + Value *Ptr = Operand.getPtr(); + // If we have a mask, skip instrumentation if we've already + // instrumented the full object. But don't add to TempsToInstrument + // because we might get another load/store with a different mask. + if (Operand.MaybeMask) { + if (TempsToInstrument.count(Ptr)) + continue; // We've seen this (whole) temp in the current BB. + } else { + if (!TempsToInstrument.insert(Ptr).second) + continue; // We've seen this temp in the current BB. + } } + OperandsToInstrument.push_back(Operand); + NumInsnsPerBB++; } } else if (((ClInvalidPointerPairs || ClInvalidPointerCmp) && isInterestingPointerComparison(&Inst)) || ((ClInvalidPointerPairs || ClInvalidPointerSub) && isInterestingPointerSubtraction(&Inst))) { PointerComparisonsOrSubtracts.push_back(&Inst); - continue; } else if (MemIntrinsic *MI = dyn_cast(&Inst)) { // ok, take it. IntrinToInstrument.push_back(MI); NumInsnsPerBB++; - continue; } else { if (isa(Inst)) NumAllocas++; if (auto *CB = dyn_cast(&Inst)) { @@ -2704,16 +2684,13 @@ } if (CallInst *CI = dyn_cast(&Inst)) maybeMarkSanitizerLibraryCallNoBuiltin(CI, TLI); - continue; } - ToInstrument.push_back(&Inst); - NumInsnsPerBB++; if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) break; } } bool UseCalls = (ClInstrumentationWithCallsThreshold >= 0 && - ToInstrument.size() + IntrinToInstrument.size() > + OperandsToInstrument.size() + IntrinToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold); const DataLayout &DL = F.getParent()->getDataLayout(); ObjectSizeOpts ObjSizeOpts; @@ -2722,12 +2699,11 @@ // Instrument. int NumInstrumented = 0; - for (auto Inst : ToInstrument) { - if (!suppressInstrumentationSiteForDebug(NumInstrumented)) { - if (isInterestingMemoryAccess(Inst, &IsWrite, &TypeSize, &Alignment)) - instrumentMop(ObjSizeVis, Inst, UseCalls, - F.getParent()->getDataLayout()); - } + for (auto &Operand : OperandsToInstrument) { + if (!suppressInstrumentationSiteForDebug(NumInstrumented)) + instrumentMop(ObjSizeVis, Operand, UseCalls, + F.getParent()->getDataLayout()); + FunctionModified = true; } for (auto Inst : IntrinToInstrument) { if (!suppressInstrumentationSiteForDebug(NumInstrumented)) diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp @@ -45,6 +45,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Instrumentation.h" +#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" @@ -211,10 +212,10 @@ unsigned AccessSizeIndex, Instruction *InsertBefore); void instrumentMemIntrinsic(MemIntrinsic *MI); - bool instrumentMemAccess(Instruction *I); - Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite, - uint64_t *TypeSize, unsigned *Alignment, - Value **MaybeMask); + bool instrumentMemAccess(InterestingMemoryOperand &O); + bool ignoreAccess(Value *Ptr); + void getInterestingMemoryOperands( + Instruction *I, SmallVectorImpl &Interesting); bool isInterestingAlloca(const AllocaInst &AI); bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size); @@ -500,62 +501,55 @@ } } -Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I, - bool *IsWrite, - uint64_t *TypeSize, - unsigned *Alignment, - Value **MaybeMask) { +bool HWAddressSanitizer::ignoreAccess(Value *Ptr) { + // Do not instrument acesses from different address spaces; we cannot deal + // with them. + Type *PtrTy = cast(Ptr->getType()->getScalarType()); + if (PtrTy->getPointerAddressSpace() != 0) + return true; + + // Ignore swifterror addresses. + // swifterror memory addresses are mem2reg promoted by instruction + // selection. As such they cannot have regular uses like an instrumentation + // function and it makes no sense to track them as memory. + if (Ptr->isSwiftError()) + return true; + + return false; +} + +void HWAddressSanitizer::getInterestingMemoryOperands( + Instruction *I, SmallVectorImpl &Interesting) { // Skip memory accesses inserted by another instrumentation. - if (I->hasMetadata("nosanitize")) return nullptr; + if (I->hasMetadata("nosanitize")) + return; // Do not instrument the load fetching the dynamic shadow address. if (LocalDynamicShadow == I) - return nullptr; + return; - Value *PtrOperand = nullptr; - const DataLayout &DL = I->getModule()->getDataLayout(); if (LoadInst *LI = dyn_cast(I)) { - if (!ClInstrumentReads) return nullptr; - *IsWrite = false; - *TypeSize = DL.getTypeStoreSizeInBits(LI->getType()); - *Alignment = LI->getAlignment(); - PtrOperand = LI->getPointerOperand(); + if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand())) + return; + Interesting.emplace_back(I, LI->getPointerOperandIndex(), false, + LI->getType(), LI->getAlignment()); } else if (StoreInst *SI = dyn_cast(I)) { - if (!ClInstrumentWrites) return nullptr; - *IsWrite = true; - *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType()); - *Alignment = SI->getAlignment(); - PtrOperand = SI->getPointerOperand(); + if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand())) + return; + Interesting.emplace_back(I, SI->getPointerOperandIndex(), true, + SI->getValueOperand()->getType(), + SI->getAlignment()); } else if (AtomicRMWInst *RMW = dyn_cast(I)) { - if (!ClInstrumentAtomics) return nullptr; - *IsWrite = true; - *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); - *Alignment = 0; - PtrOperand = RMW->getPointerOperand(); + if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand())) + return; + Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true, + RMW->getValOperand()->getType(), 0); } else if (AtomicCmpXchgInst *XCHG = dyn_cast(I)) { - if (!ClInstrumentAtomics) return nullptr; - *IsWrite = true; - *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType()); - *Alignment = 0; - PtrOperand = XCHG->getPointerOperand(); - } - - if (PtrOperand) { - // Do not instrument accesses from different address spaces; we cannot deal - // with them. - Type *PtrTy = cast(PtrOperand->getType()->getScalarType()); - if (PtrTy->getPointerAddressSpace() != 0) - return nullptr; - - // Ignore swifterror addresses. - // swifterror memory addresses are mem2reg promoted by instruction - // selection. As such they cannot have regular uses like an instrumentation - // function and it makes no sense to track them as memory. - if (PtrOperand->isSwiftError()) - return nullptr; + if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand())) + return; + Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true, + XCHG->getCompareOperand()->getType(), 0); } - - return PtrOperand; } static unsigned getPointerOperandIndex(Instruction *I) { @@ -713,40 +707,32 @@ MI->eraseFromParent(); } -bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) { - LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n"); - bool IsWrite = false; - unsigned Alignment = 0; - uint64_t TypeSize = 0; - Value *MaybeMask = nullptr; +bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) { + Value *Addr = O.getPtr(); - Value *Addr = - isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask); + LLVM_DEBUG(dbgs() << "Instrumenting: " << O.getInsn() << "\n"); - if (!Addr) - return false; - - if (MaybeMask) + if (O.MaybeMask) return false; //FIXME - IRBuilder<> IRB(I); - if (isPowerOf2_64(TypeSize) && - (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) && - (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 || - Alignment >= TypeSize / 8)) { - size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); + IRBuilder<> IRB(O.getInsn()); + if (isPowerOf2_64(O.TypeSize) && + (O.TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) && + (O.Alignment >= (1UL << Mapping.Scale) || O.Alignment == 0 || + O.Alignment >= O.TypeSize / 8)) { + size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize); if (ClInstrumentWithCalls) { - IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex], + IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex], IRB.CreatePointerCast(Addr, IntptrTy)); } else { - instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I); + instrumentMemAccessInline(Addr, O.IsWrite, AccessSizeIndex, O.getInsn()); } } else { - IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite], + IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite], {IRB.CreatePointerCast(Addr, IntptrTy), - ConstantInt::get(IntptrTy, TypeSize / 8)}); + ConstantInt::get(IntptrTy, O.TypeSize / 8)}); } - untagPointerOperand(I, Addr); + untagPointerOperand(O.getInsn(), Addr); return true; } @@ -1084,7 +1070,7 @@ LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n"); - SmallVector ToInstrument; + SmallVector OperandsToInstrument; SmallVector IntrinToInstrument; SmallVector AllocasToInstrument; SmallVector RetVec; @@ -1111,14 +1097,7 @@ if (InstrumentLandingPads && isa(Inst)) LandingPadVec.push_back(&Inst); - Value *MaybeMask = nullptr; - bool IsWrite; - unsigned Alignment; - uint64_t TypeSize; - Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize, - &Alignment, &MaybeMask); - if (Addr) - ToInstrument.push_back(&Inst); + getInterestingMemoryOperands(&Inst, OperandsToInstrument); if (MemIntrinsic *MI = dyn_cast(&Inst)) IntrinToInstrument.push_back(MI); @@ -1137,7 +1116,7 @@ F.setPersonalityFn(nullptr); } - if (AllocasToInstrument.empty() && ToInstrument.empty() && + if (AllocasToInstrument.empty() && OperandsToInstrument.empty() && IntrinToInstrument.empty()) return false; @@ -1216,8 +1195,8 @@ } } - for (auto Inst : ToInstrument) - Changed |= instrumentMemAccess(Inst); + for (auto &Operand : OperandsToInstrument) + Changed |= instrumentMemAccess(Operand); if (ClInstrumentMemIntrinsics && !IntrinToInstrument.empty()) { for (auto Inst : IntrinToInstrument)