Index: llvm/include/llvm/Analysis/AssumeBundleQueries.h =================================================================== --- llvm/include/llvm/Analysis/AssumeBundleQueries.h +++ llvm/include/llvm/Analysis/AssumeBundleQueries.h @@ -16,6 +16,7 @@ #include "llvm/IR/Attributes.h" #include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/ADT/DenseMap.h" namespace llvm { @@ -47,6 +48,18 @@ return hasAttributeInAssume(AssumeCI, IsOn, Attribute::getNameFromAttrKind(Kind), ArgVal); } +inline bool hasAttributeInAssume(IntrinsicInst &AssumeCI, Value *IsOn, StringRef AttrName, + uint64_t *ArgVal = nullptr) { + return hasAttributeInAssume(cast(AssumeCI), IsOn, AttrName, ArgVal); +} + +inline bool hasAttributeInAssume(IntrinsicInst &AssumeCI, Value *IsOn, + Attribute::AttrKind Kind, + uint64_t *ArgVal = nullptr) { + return hasAttributeInAssume(cast(AssumeCI), IsOn, Kind, + ArgVal); +} + template<> struct DenseMapInfo { static Attribute::AttrKind getEmptyKey() { @@ -88,6 +101,9 @@ /// String attributes are not inserted in the map. /// If the IR changes the map will be outdated. void fillMapFromAssume(CallInst &AssumeCI, RetainedKnowledgeMap &Result); +inline void fillMapFromAssume(IntrinsicInst &AssumeCI, RetainedKnowledgeMap &Result) { + return fillMapFromAssume(cast(AssumeCI), Result); +} /// Represent one information held inside an operand bundle of an llvm.assume. /// AttrKind is the property that holds. @@ -143,6 +159,9 @@ /// the argument to the call of llvm.assume may still be useful even if the /// function returned true. bool isAssumeWithEmptyBundle(CallInst &Assume); + inline bool isAssumeWithEmptyBundle(IntrinsicInst &Assume) { + return isAssumeWithEmptyBundle(cast(Assume)); + } /// Return a valid Knowledge associated to the Use U if its Attribute kind is /// in AttrKinds. @@ -170,6 +189,11 @@ /// This is mostly for use in the assume builder. RetainedKnowledge getKnowledgeFromBundle(CallInst &Assume, const CallBase::BundleOpInfo &BOI); +inline RetainedKnowledge getKnowledgeFromBundle(IntrinsicInst &Assume, + const CallBase::BundleOpInfo &BOI) { + return getKnowledgeFromBundle(cast(Assume), BOI); +} + } // namespace llvm Index: llvm/include/llvm/Analysis/AssumptionCache.h =================================================================== --- llvm/include/llvm/Analysis/AssumptionCache.h +++ llvm/include/llvm/Analysis/AssumptionCache.h @@ -19,6 +19,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMapInfo.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/PassManager.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" @@ -117,14 +118,24 @@ /// The call passed in must be an instruction within this function and must /// not already be in the cache. void registerAssumption(CallInst *CI); + void registerAssumption(IntrinsicInst *II) { + registerAssumption(cast(II)); + } /// Remove an \@llvm.assume intrinsic from this function's cache if it has /// been added to the cache earlier. void unregisterAssumption(CallInst *CI); + void unregisterAssumption(IntrinsicInst *II) { + unregisterAssumption(cast(II)); + } /// Update the cache of values being affected by this assumption (i.e. /// the values about which this assumption provides information). void updateAffectedValues(CallInst *CI); + void updateAffectedValues(IntrinsicInst *II) { + updateAffectedValues(cast(II)); + } + /// Clear the cache of \@llvm.assume intrinsics for a function. /// Index: llvm/include/llvm/IR/InstVisitor.h =================================================================== --- llvm/include/llvm/IR/InstVisitor.h +++ llvm/include/llvm/IR/InstVisitor.h @@ -154,8 +154,8 @@ // instruction to a special delegation helper. #define HANDLE_INST(NUM, OPCODE, CLASS) \ RetTy visit##OPCODE(CLASS &I) { \ - if (NUM == Instruction::Call) \ - return delegateCallInst(I); \ + if (NUM == Instruction::Call || NUM == Instruction::Invoke) \ + return delegateCallBase(I); \ else \ DELEGATE(CLASS); \ } @@ -215,7 +215,12 @@ RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); } RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); } RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); } - RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); } + RetTy visitIntrinsicInst(IntrinsicInst &I) { + if (isa(I)) + return static_cast(this)->visitCallInst(cast(I)); + else + return static_cast(this)->visitInvokeInst(cast(I)); + } RetTy visitCallInst(CallInst &I) { DELEGATE(CallBase); } RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(CallBase); } RetTy visitCallBrInst(CallBrInst &I) { DELEGATE(CallBase); } @@ -280,7 +285,7 @@ private: // Special helper function to delegate to CallInst subclass visitors. - RetTy delegateCallInst(CallInst &I) { + RetTy delegateCallBase(CallBase &I) { if (const Function *F = I.getCalledFunction()) { switch (F->getIntrinsicID()) { default: DELEGATE(IntrinsicInst); @@ -296,13 +301,16 @@ case Intrinsic::not_intrinsic: break; } } - DELEGATE(CallInst); + if (isa(I)) + return static_cast(this)->visitCallInst(cast(I)); + else + return static_cast(this)->visitInvokeInst(cast(I)); } // An overload that will never actually be called, it is used only from dead // code in the dispatching from opcodes to instruction subclasses. - RetTy delegateCallInst(Instruction &I) { - llvm_unreachable("delegateCallInst called for non-CallInst"); + RetTy delegateCallBase(Instruction &I) { + llvm_unreachable("delegateCallBase called for non-CallInst"); } }; Index: llvm/include/llvm/IR/IntrinsicInst.h =================================================================== --- llvm/include/llvm/IR/IntrinsicInst.h +++ llvm/include/llvm/IR/IntrinsicInst.h @@ -13,10 +13,10 @@ // if (MemCpyInst *MCI = dyn_cast(Inst)) // ... MCI->getDest() ... MCI->getSource() ... // -// All intrinsic function calls are instances of the call instruction, so these -// are all subclasses of the CallInst class. Note that none of these classes -// has state or virtual methods, which is an important part of this gross/neat -// hack working. +// All intrinsic function calls are instances of the call or invoke instruction, +// so these are all subclasses of the CallBase class. Note that none of these +// classes has state or virtual methods, which is an important part of this +// gross/neat hack working. // //===----------------------------------------------------------------------===// @@ -42,7 +42,7 @@ /// A wrapper class for inspecting calls to intrinsic functions. /// This allows the standard isa/dyncast/cast functionality to work with calls /// to intrinsic functions. -class IntrinsicInst : public CallInst { +class IntrinsicInst : public CallBase { public: IntrinsicInst() = delete; IntrinsicInst(const IntrinsicInst &) = delete; @@ -107,13 +107,13 @@ } // Methods for support type inquiry through isa, cast, and dyn_cast: - static bool classof(const CallInst *I) { + static bool classof(const CallBase *I) { if (const Function *CF = I->getCalledFunction()) return CF->isIntrinsic(); return false; } static bool classof(const Value *V) { - return isa(V) && classof(cast(V)); + return isa(V) && classof(cast(V)); } }; Index: llvm/include/llvm/IR/Statepoint.h =================================================================== --- llvm/include/llvm/IR/Statepoint.h +++ llvm/include/llvm/IR/Statepoint.h @@ -57,23 +57,19 @@ class GCRelocateInst; class GCResultInst; -/// Represents a gc.statepoint intrinsic call. This extends directly from -/// CallBase as the IntrinsicInst only supports calls and gc.statepoint is -/// invokable. -class GCStatepointInst : public CallBase { +/// Represents a gc.statepoint intrinsic call. +class GCStatepointInst : public IntrinsicInst { public: GCStatepointInst() = delete; GCStatepointInst(const GCStatepointInst &) = delete; GCStatepointInst &operator=(const GCStatepointInst &) = delete; - static bool classof(const CallBase *I) { - if (const Function *CF = I->getCalledFunction()) - return CF->getIntrinsicID() == Intrinsic::experimental_gc_statepoint; - return false; + static bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::experimental_gc_statepoint; } static bool classof(const Value *V) { - return isa(V) && classof(cast(V)); + return isa(V) && classof(cast(V)); } enum { Index: llvm/lib/Analysis/AssumeBundleQueries.cpp =================================================================== --- llvm/lib/Analysis/AssumeBundleQueries.cpp +++ llvm/lib/Analysis/AssumeBundleQueries.cpp @@ -59,12 +59,13 @@ if (BOI.Tag->getKey() != AttrName) continue; if (IsOn && (BOI.End - BOI.Begin <= ABA_WasOn || - IsOn != getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn))) + IsOn != getValueFromBundleOpInfo(AssumeCI, BOI, ABA_WasOn))) continue; if (ArgVal) { assert(BOI.End - BOI.Begin > ABA_Argument); *ArgVal = - cast(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument)) + cast(getValueFromBundleOpInfo(AssumeCI, BOI, + ABA_Argument)) ->getZExtValue(); } return true; @@ -80,7 +81,7 @@ std::pair Key{ nullptr, Attribute::getAttrKindFromName(Bundles.Tag->getKey())}; if (bundleHasArgument(Bundles, ABA_WasOn)) - Key.first = getValueFromBundleOpInfo(Assume, Bundles, ABA_WasOn); + Key.first = getValueFromBundleOpInfo(AssumeCI, Bundles, ABA_WasOn); if (Key.first == nullptr && Key.second == Attribute::None) continue; @@ -89,7 +90,7 @@ continue; } auto *CI = dyn_cast( - getValueFromBundleOpInfo(Assume, Bundles, ABA_Argument)); + getValueFromBundleOpInfo(AssumeCI, Bundles, ABA_Argument)); if (!CI) continue; unsigned Val = CI->getZExtValue(); @@ -180,7 +181,7 @@ if (!II || Elem.Index == AssumptionCache::ExprResultIdx) continue; if (RetainedKnowledge RK = getKnowledgeFromBundle( - *II, II->bundle_op_info_begin()[Elem.Index])) { + cast(*II), II->bundle_op_info_begin()[Elem.Index])) { if (V != RK.WasOn) continue; if (is_contained(AttrKinds, RK.AttrKind) && Index: llvm/lib/CodeGen/SelectionDAG/FastISel.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1337,15 +1337,15 @@ return true; } case Intrinsic::experimental_stackmap: - return selectStackmap(II); + return selectStackmap(cast(II)); case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: - return selectPatchpoint(II); + return selectPatchpoint(cast(II)); case Intrinsic::xray_customevent: - return selectXRayCustomEvent(II); + return selectXRayCustomEvent(cast(II)); case Intrinsic::xray_typedevent: - return selectXRayTypedEvent(II); + return selectXRayTypedEvent(cast(II)); } return fastLowerIntrinsicCall(II); Index: llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -738,7 +738,7 @@ #ifndef NDEBUG for (auto *Reloc : SI.GCRelocates) if (Reloc->getParent() == SI.StatepointInstr->getParent()) - StatepointLowering.scheduleRelocCall(*Reloc); + StatepointLowering.scheduleRelocCall(cast(*Reloc)); #endif // Lower statepoint vmstate and gcstate arguments @@ -1183,7 +1183,7 @@ // statepoint. It would be too expensive to preserve validation info through // different basic blocks. if (Relocate.getStatepoint()->getParent() == Relocate.getParent()) - StatepointLowering.relocCallVisited(Relocate); + StatepointLowering.relocCallVisited(cast(Relocate)); auto *Ty = Relocate.getType()->getScalarType(); if (auto IsManaged = GFI->getStrategy().isGCManagedPointer(Ty)) Index: llvm/lib/CodeGen/ShadowStackGCLowering.cpp =================================================================== --- llvm/lib/CodeGen/ShadowStackGCLowering.cpp +++ llvm/lib/CodeGen/ShadowStackGCLowering.cpp @@ -244,7 +244,7 @@ if (Function *F = CI->getCalledFunction()) if (F->getIntrinsicID() == Intrinsic::gcroot) { std::pair Pair = std::make_pair( - CI, + cast(CI), cast(CI->getArgOperand(0)->stripPointerCasts())); if (IsNullValue(CI->getArgOperand(1))) Roots.push_back(Pair); Index: llvm/lib/CodeGen/StackProtector.cpp =================================================================== --- llvm/lib/CodeGen/StackProtector.cpp +++ llvm/lib/CodeGen/StackProtector.cpp @@ -255,7 +255,7 @@ for (const Instruction &I : BB) if (const auto *II = dyn_cast(&I)) if (II->getIntrinsicID() == Intrinsic::stackprotector) - return II; + return cast(II); return nullptr; } Index: llvm/lib/ExecutionEngine/Interpreter/Execution.cpp =================================================================== --- llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1143,7 +1143,7 @@ bool atBegin(Parent->begin() == Me); if (!atBegin) --Me; - IL->LowerIntrinsicCall(&I); + IL->LowerIntrinsicCall(cast(&I)); // Restore the CurInst pointer to the first instruction newly inserted, if // any. Index: llvm/lib/Target/X86/X86FastISel.cpp =================================================================== --- llvm/lib/Target/X86/X86FastISel.cpp +++ llvm/lib/Target/X86/X86FastISel.cpp @@ -2738,7 +2738,7 @@ if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255) return false; - return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1); + return lowerCallTo(cast(II), "memcpy", II->getNumArgOperands() - 1); } case Intrinsic::memset: { const MemSetInst *MSI = cast(II); @@ -2753,7 +2753,7 @@ if (MSI->getDestAddressSpace() > 255) return false; - return lowerCallTo(II, "memset", II->getNumArgOperands() - 1); + return lowerCallTo(cast(II), "memset", II->getNumArgOperands() - 1); } case Intrinsic::stackprotector: { // Emit code to store the stack guard onto the stack. Index: llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2110,7 +2110,7 @@ Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType()); - return IntrinsicInst::Create(F, {ShVal0, ShVal1, ShAmt}); + return CallInst::Create(F, {ShVal0, ShVal1, ShAmt}); } /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns. Index: llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -608,7 +608,7 @@ Y = Builder.CreateTrunc(ShVal1, DestTy); Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); - return IntrinsicInst::Create(F, {X, Y, NarrowShAmt}); + return CallInst::Create(F, {X, Y, NarrowShAmt}); } /// Try to narrow the width of math or bitwise logic instructions by pulling a @@ -2702,7 +2702,7 @@ Function *Bswap = Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); - return IntrinsicInst::Create(Bswap, { ScalarX }); + return CallInst::Create(Bswap, { ScalarX }); } } Index: llvm/lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -140,6 +140,7 @@ Instruction *visitAddrSpaceCast(AddrSpaceCastInst &CI); Instruction *foldItoFPtoI(CastInst &FI); Instruction *visitSelectInst(SelectInst &SI); + Instruction *visitCallBase(CallBase &Call); Instruction *visitCallInst(CallInst &CI); Instruction *visitInvokeInst(InvokeInst &II); Instruction *visitCallBrInst(CallBrInst &CBI); @@ -221,7 +222,6 @@ Instruction &CtxI, Value *&OperationResult, Constant *&OverflowResult); - Instruction *visitCallBase(CallBase &Call); Instruction *tryOptimizeCall(CallInst *CI); bool transformConstExprCastCall(CallBase &Call); Instruction *transformCallThroughTrampoline(CallBase &Call, Index: llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp =================================================================== --- llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -2383,7 +2383,7 @@ Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType()); ShAmt = Builder.CreateZExt(ShAmt, Sel.getType()); - return IntrinsicInst::Create(F, { SV0, SV1, ShAmt }); + return CallInst::Create(F, { SV0, SV1, ShAmt }); } static Instruction *foldSelectToCopysign(SelectInst &Sel, @@ -2424,7 +2424,7 @@ Value *MagArg = TC->isNegative() ? FVal : TVal; Function *F = Intrinsic::getDeclaration(Sel.getModule(), Intrinsic::copysign, Sel.getType()); - Instruction *CopySign = IntrinsicInst::Create(F, { MagArg, X }); + Instruction *CopySign = CallInst::Create(F, { MagArg, X }); CopySign->setFastMathFlags(Sel.getFastMathFlags()); return CopySign; } Index: llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp =================================================================== --- llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -4142,7 +4142,7 @@ Value *VAArgTLSOriginCopy = nullptr; Value *VAArgOverflowSize = nullptr; - SmallVector VAStartInstrumentationList; + SmallVector VAStartInstrumentationList; enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; @@ -4351,7 +4351,7 @@ // Instrument va_start. // Copy va_list shadow from the backup copy of the TLS contents. for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { - CallInst *OrigInst = VAStartInstrumentationList[i]; + auto *OrigInst = VAStartInstrumentationList[i]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); @@ -4405,7 +4405,7 @@ Value *VAArgTLSCopy = nullptr; Value *VAArgSize = nullptr; - SmallVector VAStartInstrumentationList; + SmallVector VAStartInstrumentationList; VarArgMIPS64Helper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {} @@ -4494,7 +4494,7 @@ // Instrument va_start. // Copy va_list shadow from the backup copy of the TLS contents. for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { - CallInst *OrigInst = VAStartInstrumentationList[i]; + auto *OrigInst = VAStartInstrumentationList[i]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C); @@ -4533,7 +4533,7 @@ Value *VAArgTLSCopy = nullptr; Value *VAArgOverflowSize = nullptr; - SmallVector VAStartInstrumentationList; + SmallVector VAStartInstrumentationList; enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory }; @@ -4688,7 +4688,7 @@ // Instrument va_start, copy va_list shadow from the backup copy of // the TLS contents. for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { - CallInst *OrigInst = VAStartInstrumentationList[i]; + auto *OrigInst = VAStartInstrumentationList[i]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); @@ -4783,7 +4783,7 @@ Value *VAArgTLSCopy = nullptr; Value *VAArgSize = nullptr; - SmallVector VAStartInstrumentationList; + SmallVector VAStartInstrumentationList; VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {} @@ -4932,7 +4932,7 @@ // Instrument va_start. // Copy va_list shadow from the backup copy of the TLS contents. for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) { - CallInst *OrigInst = VAStartInstrumentationList[i]; + auto *OrigInst = VAStartInstrumentationList[i]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C); @@ -4972,7 +4972,7 @@ Value *VAArgTLSOriginCopy = nullptr; Value *VAArgOverflowSize = nullptr; - SmallVector VAStartInstrumentationList; + SmallVector VAStartInstrumentationList; enum class ArgKind { GeneralPurpose, @@ -5255,7 +5255,7 @@ // Copy va_list shadow from the backup copy of the TLS contents. for (size_t VaStartNo = 0, VaStartNum = VAStartInstrumentationList.size(); VaStartNo < VaStartNum; VaStartNo++) { - CallInst *OrigInst = VAStartInstrumentationList[VaStartNo]; + auto *OrigInst = VAStartInstrumentationList[VaStartNo]; IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); copyRegSaveArea(IRB, VAListTag); Index: llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp =================================================================== --- llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp +++ llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp @@ -463,7 +463,7 @@ OpBundle.push_back(OperandBundleDef{ss.str().c_str(), std::move(Args)}); } - auto *Assume = cast(IntrinsicInst::Create( + auto *Assume = cast(CallInst::Create( FnAssume, ArrayRef({ConstantInt::getTrue(C)}), OpBundle)); Assume->insertBefore(&F->begin()->front()); RetainedKnowledgeMap Map;