Index: cfe/trunk/include/clang/StaticAnalyzer/Checkers/Checkers.td =================================================================== --- cfe/trunk/include/clang/StaticAnalyzer/Checkers/Checkers.td +++ cfe/trunk/include/clang/StaticAnalyzer/Checkers/Checkers.td @@ -72,6 +72,8 @@ def LocalizabilityAlpha : Package<"localizability">, InPackage; def LocalizabilityOptIn : Package<"localizability">, InPackage; +def MPI : Package<"mpi">, InPackage; + def LLVM : Package<"llvm">; def Debug : Package<"debug">; @@ -577,6 +579,12 @@ DescFile<"LocalizationChecker.cpp">; } +let ParentPackage = MPI in { + def MPIChecker : Checker<"MPI-Checker">, + HelpText<"Checks MPI code">, + DescFile<"MPIChecker.cpp">; +} + //===----------------------------------------------------------------------===// // Checkers for LLVM development. //===----------------------------------------------------------------------===// Index: cfe/trunk/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h =================================================================== --- cfe/trunk/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h +++ cfe/trunk/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h @@ -150,6 +150,28 @@ template const RegionTy* getAs() const; virtual bool isBoundable() const { return false; } + + + /// Get descriptive name for memory region. The name is obtained from + /// the variable/field declaration retrieved from the memory region. + /// Regions that point to an element of an array are returned as: "arr[0]". + /// Regions that point to a struct are returned as: "st.var". + // + /// \param UseQuotes Set if the name should be quoted. + /// + /// \returns variable name for memory region + std::string getDescriptiveName(bool UseQuotes = true) const; + + + /// Retrieve source range from memory region. The range retrieval + /// is based on the decl obtained from the memory region. + /// For a VarRegion the range of the base region is returned. + /// For a FieldRegion the range of the field is returned. + /// If no declaration is found, an empty source range is returned. + /// The client is responsible for checking if the returned range is valid. + /// + /// \returns source range for declaration retrieved from memory region + clang::SourceRange sourceRange() const; }; /// MemSpaceRegion - A memory region that represents a "memory space"; Index: cfe/trunk/lib/StaticAnalyzer/Checkers/CMakeLists.txt =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/CMakeLists.txt +++ cfe/trunk/lib/StaticAnalyzer/Checkers/CMakeLists.txt @@ -41,6 +41,9 @@ MallocChecker.cpp MallocOverflowSecurityChecker.cpp MallocSizeofChecker.cpp + MPI-Checker/MPIBugReporter.cpp + MPI-Checker/MPIChecker.cpp + MPI-Checker/MPIFunctionClassifier.cpp NSAutoreleasePoolChecker.cpp NSErrorChecker.cpp NoReturnFunctionChecker.cpp Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h @@ -0,0 +1,110 @@ +//===-- MPIBugReporter.h - bug reporter -----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines prefabricated reports which are emitted in +/// case of MPI related bugs, detected by path-sensitive analysis. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIBUGREPORTER_H +#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIBUGREPORTER_H + +#include "MPIFunctionClassifier.h" +#include "MPITypes.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" + +namespace clang { +namespace ento { +namespace mpi { + +class MPIBugReporter { +public: + MPIBugReporter(BugReporter &BR, const CheckerBase &CB, + const MPIFunctionClassifier &FC) + : BReporter{BR} { + UnmatchedWaitBugType.reset(new BugType(&CB, "Unmatched wait", MPIError)); + DoubleNonblockingBugType.reset( + new BugType(&CB, "Double nonblocking", MPIError)); + MissingWaitBugType.reset(new BugType(&CB, "Missing wait", MPIError)); + } + + /// Report duplicate request use by nonblocking calls without intermediate + /// wait. + /// + /// \param MPICallEvent MPI call that caused the double nonblocking + /// \param Req request that was used by two nonblocking calls in sequence + /// \param RequestRegion memory region of the request + /// \param ExplNode node in the graph the bug appeared at + void reportDoubleNonblocking(const CallEvent &MPICallEvent, + const Request &Req, + const MemRegion *const RequestRegion, + const ExplodedNode *const ExplNode) const; + + /// Report a missing wait for a nonblocking call. A missing wait report + /// is emitted if a nonblocking call is not matched in the scope of a + /// function. + /// + /// \param Req request that is not matched by a wait + /// \param RequestRegion memory region of the request + /// \param ExplNode node in the graph the bug appeared at + void reportMissingWait(const Request &Req, + const MemRegion *const RequestRegion, + const ExplodedNode *const ExplNode) const; + + /// Report a wait on a request that has not been used at all before. + /// + /// \param CE wait call that uses the request + /// \param ReqRegion memory region of the request + /// \param ExplNode node in the graph the bug appeared at + void reportUnmatchedWait(const CallEvent &CE, + const MemRegion *const RequestRegion, + const ExplodedNode *const ExplNode) const; + +private: + const std::string MPIError{"MPI Error"}; + + // path-sensitive bug types + std::unique_ptr UnmatchedWaitBugType; + std::unique_ptr MissingWaitBugType; + std::unique_ptr DoubleNonblockingBugType; + + BugReporter &BReporter; + + /// Bug visitor class to find the node where the request region was previously + /// used in order to include it into the BugReport path. + class RequestNodeVisitor : public BugReporterVisitorImpl { + public: + RequestNodeVisitor(const MemRegion *const MemoryRegion, + const std::string &ErrText) + : RequestRegion(MemoryRegion), ErrorText{ErrText} {} + + void Profile(llvm::FoldingSetNodeID &ID) const override { + static int X = 0; + ID.AddPointer(&X); + ID.AddPointer(RequestRegion); + } + + PathDiagnosticPiece *VisitNode(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext &BRC, + BugReport &BR) override; + + private: + const MemRegion *const RequestRegion; + bool IsNodeFound{false}; + std::string ErrorText; + }; +}; + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang + +#endif Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp @@ -0,0 +1,112 @@ +//===-- MPIBugReporter.cpp - bug reporter -----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines prefabricated reports which are emitted in +/// case of MPI related bugs, detected by path-sensitive analysis. +/// +//===----------------------------------------------------------------------===// + +#include "MPIBugReporter.h" +#include "MPIChecker.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" + +namespace clang { +namespace ento { +namespace mpi { + +void MPIBugReporter::reportDoubleNonblocking( + const CallEvent &MPICallEvent, const ento::mpi::Request &Req, + const MemRegion *const RequestRegion, + const ExplodedNode *const ExplNode) const { + + std::string ErrorText; + ErrorText = "Double nonblocking on request " + + RequestRegion->getDescriptiveName() + ". "; + + auto Report = llvm::make_unique(*DoubleNonblockingBugType, + ErrorText, ExplNode); + + Report->addRange(MPICallEvent.getSourceRange()); + SourceRange Range = RequestRegion->sourceRange(); + + if (Range.isValid()) + Report->addRange(Range); + + Report->addVisitor(llvm::make_unique( + RequestRegion, "Request is previously used by nonblocking call here. ")); + Report->markInteresting(RequestRegion); + + BReporter.emitReport(std::move(Report)); +} + +void MPIBugReporter::reportMissingWait( + const ento::mpi::Request &Req, const MemRegion *const RequestRegion, + const ExplodedNode *const ExplNode) const { + std::string ErrorText{"Request " + RequestRegion->getDescriptiveName() + + " has no matching wait. "}; + + auto Report = + llvm::make_unique(*MissingWaitBugType, ErrorText, ExplNode); + + SourceRange Range = RequestRegion->sourceRange(); + if (Range.isValid()) + Report->addRange(Range); + Report->addVisitor(llvm::make_unique( + RequestRegion, "Request is previously used by nonblocking call here. ")); + Report->markInteresting(RequestRegion); + + BReporter.emitReport(std::move(Report)); +} + +void MPIBugReporter::reportUnmatchedWait( + const CallEvent &CE, const clang::ento::MemRegion *const RequestRegion, + const ExplodedNode *const ExplNode) const { + std::string ErrorText{"Request " + RequestRegion->getDescriptiveName() + + " has no matching nonblocking call. "}; + + auto Report = + llvm::make_unique(*UnmatchedWaitBugType, ErrorText, ExplNode); + + Report->addRange(CE.getSourceRange()); + SourceRange Range = RequestRegion->sourceRange(); + if (Range.isValid()) + Report->addRange(Range); + + BReporter.emitReport(std::move(Report)); +} + +PathDiagnosticPiece *MPIBugReporter::RequestNodeVisitor::VisitNode( + const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC, + BugReport &BR) { + + if (IsNodeFound) + return nullptr; + + const Request *const Req = N->getState()->get(RequestRegion); + const Request *const PrevReq = + PrevN->getState()->get(RequestRegion); + + // Check if request was previously unused or in a different state. + if ((Req && !PrevReq) || (Req->CurrentState != PrevReq->CurrentState)) { + IsNodeFound = true; + + ProgramPoint P = PrevN->getLocation(); + PathDiagnosticLocation L = + PathDiagnosticLocation::create(P, BRC.getSourceManager()); + + return new PathDiagnosticEventPiece(L, ErrorText); + } + + return nullptr; +} + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h @@ -0,0 +1,110 @@ +//===-- MPIChecker.h - Verify MPI API usage- --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines the main class of MPI-Checker which serves as an entry +/// point. It is created once for each translation unit analysed. +/// The checker defines path-sensitive checks, to verify correct usage of the +/// MPI API. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPICHECKER_H +#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPICHECKER_H + +#include "MPIBugReporter.h" +#include "MPIFunctionClassifier.h" +#include "MPITypes.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" + +namespace clang { +namespace ento { +namespace mpi { + +class MPIChecker : public Checker { +public: + // path-sensitive callbacks + void checkPreCall(const CallEvent &CE, CheckerContext &Ctx) const { + dynamicInit(Ctx); + checkUnmatchedWaits(CE, Ctx); + checkDoubleNonblocking(CE, Ctx); + } + + void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &Ctx) const { + dynamicInit(Ctx); + checkMissingWaits(SymReaper, Ctx); + } + + void dynamicInit(CheckerContext &Ctx) const { + if (IsInitialized) + return; + const_cast &>(FuncClassifier) + .reset(new MPIFunctionClassifier{Ctx.getAnalysisManager()}); + + const_cast &>(BReporter).reset( + new MPIBugReporter{Ctx.getBugReporter(), *this, *FuncClassifier}); + + const_cast(IsInitialized) = true; + } + + /// Checks if a request is used by nonblocking calls multiple times + /// in sequence without intermediate wait. The check contains a guard, + /// in order to only inspect nonblocking functions. + /// + /// \param PreCallEvent MPI call to verify + void checkDoubleNonblocking(const clang::ento::CallEvent &PreCallEvent, + clang::ento::CheckerContext &Ctx) const; + + /// Checks if a request is used by a wait multiple times in sequence without + /// intermediate nonblocking call or if the request used by the wait + /// function was not used at all before. The check contains a guard, + /// in order to only inspect wait functions. + /// + /// \param PreCallEvent MPI call to verify + void checkUnmatchedWaits(const clang::ento::CallEvent &PreCallEvent, + clang::ento::CheckerContext &Ctx) const; + + /// Check if a nonblocking call is not matched by a wait. + /// If a memory region is not alive and the last function using the + /// request was a nonblocking call, this is rated as a missing wait. + void checkMissingWaits(clang::ento::SymbolReaper &SymReaper, + clang::ento::CheckerContext &Ctx) const; + +private: + /// Collects all memory regions of a request(array) used by a wait + /// function. If the wait function uses a single request, this is a single + /// region. For wait functions using multiple requests, multiple regions + /// representing elements in the array are collected. + /// + /// \param ReqRegions vector the regions get pushed into + /// \param MR top most region to iterate + /// \param CE MPI wait call using the request(s) + void allRegionsUsedByWait( + llvm::SmallVector &ReqRegions, + const clang::ento::MemRegion *const MR, const clang::ento::CallEvent &CE, + clang::ento::CheckerContext &Ctx) const; + + /// Returns the memory region used by a wait function. + /// Distinguishes between MPI_Wait and MPI_Waitall. + /// + /// \param CE MPI wait call + const clang::ento::MemRegion * + topRegionUsedByWait(const clang::ento::CallEvent &CE) const; + + const std::unique_ptr FuncClassifier; + const std::unique_ptr BReporter; + bool IsInitialized{false}; +}; + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang + +#endif Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp @@ -0,0 +1,190 @@ +//===-- MPIChecker.cpp - Checker Entry Point Class --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines the main class of MPI-Checker which serves as an entry +/// point. It is created once for each translation unit analysed. +/// The checker defines path-sensitive checks, to verify correct usage of the +/// MPI API. +/// +//===----------------------------------------------------------------------===// + +#include "MPIChecker.h" +#include "../ClangSACheckers.h" + +namespace clang { +namespace ento { +namespace mpi { + +void MPIChecker::checkDoubleNonblocking(const CallEvent &PreCallEvent, + CheckerContext &Ctx) const { + if (!FuncClassifier->isNonBlockingType(PreCallEvent.getCalleeIdentifier())) { + return; + } + const MemRegion *const MR = + PreCallEvent.getArgSVal(PreCallEvent.getNumArgs() - 1).getAsRegion(); + if (!MR) + return; + const ElementRegion *const ER = dyn_cast(MR); + + // The region must be typed, in order to reason about it. + if (!isa(MR) || (ER && !isa(ER->getSuperRegion()))) + return; + + ProgramStateRef State = Ctx.getState(); + const Request *const Req = State->get(MR); + + // double nonblocking detected + if (Req && Req->CurrentState == Request::State::Nonblocking) { + ExplodedNode *ErrorNode = Ctx.generateNonFatalErrorNode(); + BReporter->reportDoubleNonblocking(PreCallEvent, *Req, MR, ErrorNode); + Ctx.addTransition(ErrorNode->getState(), ErrorNode); + } + // no error + else { + State = State->set(MR, Request::State::Nonblocking); + Ctx.addTransition(State); + } +} + +void MPIChecker::checkUnmatchedWaits(const CallEvent &PreCallEvent, + CheckerContext &Ctx) const { + if (!FuncClassifier->isWaitType(PreCallEvent.getCalleeIdentifier())) + return; + const MemRegion *const MR = topRegionUsedByWait(PreCallEvent); + if (!MR) + return; + const ElementRegion *const ER = dyn_cast(MR); + + // The region must be typed, in order to reason about it. + if (!isa(MR) || (ER && !isa(ER->getSuperRegion()))) + return; + + llvm::SmallVector ReqRegions; + allRegionsUsedByWait(ReqRegions, MR, PreCallEvent, Ctx); + if (ReqRegions.empty()) + return; + + ProgramStateRef State = Ctx.getState(); + static CheckerProgramPointTag Tag("MPI-Checker", "UnmatchedWait"); + ExplodedNode *ErrorNode{nullptr}; + + // Check all request regions used by the wait function. + for (const auto &ReqRegion : ReqRegions) { + const Request *const Req = State->get(ReqRegion); + State = State->set(ReqRegion, Request::State::Wait); + if (!Req) { + if (!ErrorNode) { + ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag); + State = ErrorNode->getState(); + } + // A wait has no matching nonblocking call. + BReporter->reportUnmatchedWait(PreCallEvent, ReqRegion, ErrorNode); + } + } + + if (!ErrorNode) { + Ctx.addTransition(State); + } else { + Ctx.addTransition(State, ErrorNode); + } +} + +void MPIChecker::checkMissingWaits(SymbolReaper &SymReaper, + CheckerContext &Ctx) const { + if (!SymReaper.hasDeadSymbols()) + return; + + ProgramStateRef State = Ctx.getState(); + const auto &Requests = State->get(); + if (Requests.isEmpty()) + return; + + static CheckerProgramPointTag Tag("MPI-Checker", "MissingWait"); + ExplodedNode *ErrorNode{nullptr}; + + auto ReqMap = State->get(); + for (const auto &Req : ReqMap) { + if (!SymReaper.isLiveRegion(Req.first)) { + if (Req.second.CurrentState == Request::State::Nonblocking) { + + if (!ErrorNode) { + ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag); + State = ErrorNode->getState(); + } + BReporter->reportMissingWait(Req.second, Req.first, ErrorNode); + } + State = State->remove(Req.first); + } + } + + // Transition to update the state regarding removed requests. + if (!ErrorNode) { + Ctx.addTransition(State); + } else { + Ctx.addTransition(State, ErrorNode); + } +} + +const MemRegion *MPIChecker::topRegionUsedByWait(const CallEvent &CE) const { + + if (FuncClassifier->isMPI_Wait(CE.getCalleeIdentifier())) { + return CE.getArgSVal(0).getAsRegion(); + } else if (FuncClassifier->isMPI_Waitall(CE.getCalleeIdentifier())) { + return CE.getArgSVal(1).getAsRegion(); + } else { + return (const MemRegion *)nullptr; + } +} + +void MPIChecker::allRegionsUsedByWait( + llvm::SmallVector &ReqRegions, + const MemRegion *const MR, const CallEvent &CE, CheckerContext &Ctx) const { + + MemRegionManager *const RegionManager = MR->getMemRegionManager(); + + if (FuncClassifier->isMPI_Waitall(CE.getCalleeIdentifier())) { + const MemRegion *SuperRegion{nullptr}; + if (const ElementRegion *const ER = MR->getAs()) { + SuperRegion = ER->getSuperRegion(); + } + + // A single request is passed to MPI_Waitall. + if (!SuperRegion) { + ReqRegions.push_back(MR); + return; + } + + const auto &Size = Ctx.getStoreManager().getSizeInElements( + Ctx.getState(), SuperRegion, + CE.getArgExpr(1)->getType()->getPointeeType()); + const llvm::APSInt &ArrSize = Size.getAs()->getValue(); + + for (size_t i = 0; i < ArrSize; ++i) { + const NonLoc Idx = Ctx.getSValBuilder().makeArrayIndex(i); + + const ElementRegion *const ER = RegionManager->getElementRegion( + CE.getArgExpr(1)->getType()->getPointeeType(), Idx, SuperRegion, + Ctx.getASTContext()); + + ReqRegions.push_back(ER->getAs()); + } + } else if (FuncClassifier->isMPI_Wait(CE.getCalleeIdentifier())) { + ReqRegions.push_back(MR); + } +} + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang + +// Registers the checker for static analysis. +void clang::ento::registerMPIChecker(CheckerManager &MGR) { + MGR.registerChecker(); +} Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h @@ -0,0 +1,97 @@ +//===-- MPIFunctionClassifier.h - classifies MPI functions ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functionality to identify and classify MPI functions. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H +#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H + +#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" + +namespace clang { +namespace ento { +namespace mpi { + +class MPIFunctionClassifier { +public: + MPIFunctionClassifier(AnalysisManager &AM) { identifierInit(AM); } + + // general identifiers + bool isMPIType(const IdentifierInfo *const IdentInfo) const; + bool isNonBlockingType(const IdentifierInfo *const IdentInfo) const; + + // point-to-point identifiers + bool isPointToPointType(const IdentifierInfo *const IdentInfo) const; + + // collective identifiers + bool isCollectiveType(const IdentifierInfo *const IdentInfo) const; + bool isCollToColl(const IdentifierInfo *const IdentInfo) const; + bool isScatterType(const IdentifierInfo *const IdentInfo) const; + bool isGatherType(const IdentifierInfo *const IdentInfo) const; + bool isAllgatherType(const IdentifierInfo *const IdentInfo) const; + bool isAlltoallType(const IdentifierInfo *const IdentInfo) const; + bool isReduceType(const IdentifierInfo *const IdentInfo) const; + bool isBcastType(const IdentifierInfo *const IdentInfo) const; + + // additional identifiers + bool isMPI_Wait(const IdentifierInfo *const IdentInfo) const; + bool isMPI_Waitall(const IdentifierInfo *const IdentInfo) const; + bool isWaitType(const IdentifierInfo *const IdentInfo) const; + +private: + // Initializes function identifiers, to recognize them during analysis. + void identifierInit(AnalysisManager &AM); + void initPointToPointIdentifiers(AnalysisManager &AM); + void initCollectiveIdentifiers(AnalysisManager &AM); + void initAdditionalIdentifiers(AnalysisManager &AM); + + // The containers are used, to enable classification of MPI-functions during + // analysis. + llvm::SmallVector MPINonBlockingTypes; + + llvm::SmallVector MPIPointToPointTypes; + llvm::SmallVector MPICollectiveTypes; + + llvm::SmallVector MPIPointToCollTypes; + llvm::SmallVector MPICollToPointTypes; + llvm::SmallVector MPICollToCollTypes; + + llvm::SmallVector MPIType; + + // point-to-point functions + IdentifierInfo *IdentInfo_MPI_Send{nullptr}, *IdentInfo_MPI_Isend{nullptr}, + *IdentInfo_MPI_Ssend{nullptr}, *IdentInfo_MPI_Issend{nullptr}, + *IdentInfo_MPI_Bsend{nullptr}, *IdentInfo_MPI_Ibsend{nullptr}, + *IdentInfo_MPI_Rsend{nullptr}, *IdentInfo_MPI_Irsend{nullptr}, + *IdentInfo_MPI_Recv{nullptr}, *IdentInfo_MPI_Irecv{nullptr}; + + // collective functions + IdentifierInfo *IdentInfo_MPI_Scatter{nullptr}, + *IdentInfo_MPI_Iscatter{nullptr}, *IdentInfo_MPI_Gather{nullptr}, + *IdentInfo_MPI_Igather{nullptr}, *IdentInfo_MPI_Allgather{nullptr}, + *IdentInfo_MPI_Iallgather{nullptr}, *IdentInfo_MPI_Bcast{nullptr}, + *IdentInfo_MPI_Ibcast{nullptr}, *IdentInfo_MPI_Reduce{nullptr}, + *IdentInfo_MPI_Ireduce{nullptr}, *IdentInfo_MPI_Allreduce{nullptr}, + *IdentInfo_MPI_Iallreduce{nullptr}, *IdentInfo_MPI_Alltoall{nullptr}, + *IdentInfo_MPI_Ialltoall{nullptr}, *IdentInfo_MPI_Barrier{nullptr}; + + // additional functions + IdentifierInfo *IdentInfo_MPI_Comm_rank{nullptr}, + *IdentInfo_MPI_Comm_size{nullptr}, *IdentInfo_MPI_Wait{nullptr}, + *IdentInfo_MPI_Waitall{nullptr}; +}; + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang + +#endif Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp @@ -0,0 +1,291 @@ +//===-- MPIFunctionClassifier.cpp - classifies MPI functions ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines functionality to identify and classify MPI functions. +/// +//===----------------------------------------------------------------------===// + +#include "MPIFunctionClassifier.h" +#include "llvm/ADT/STLExtras.h" + +namespace clang { +namespace ento { +namespace mpi { + +void MPIFunctionClassifier::identifierInit(AnalysisManager &AM) { + // Initialize function identifiers. + initPointToPointIdentifiers(AM); + initCollectiveIdentifiers(AM); + initAdditionalIdentifiers(AM); +} + +void MPIFunctionClassifier::initPointToPointIdentifiers( + clang::ento::AnalysisManager &AM) { + ASTContext &ASTCtx = AM.getASTContext(); + + // Copy identifiers into the correct classification containers. + IdentInfo_MPI_Send = &ASTCtx.Idents.get("MPI_Send"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Send); + MPIType.push_back(IdentInfo_MPI_Send); + assert(IdentInfo_MPI_Send); + + IdentInfo_MPI_Isend = &ASTCtx.Idents.get("MPI_Isend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Isend); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Isend); + MPIType.push_back(IdentInfo_MPI_Isend); + assert(IdentInfo_MPI_Isend); + + IdentInfo_MPI_Ssend = &ASTCtx.Idents.get("MPI_Ssend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Ssend); + MPIType.push_back(IdentInfo_MPI_Ssend); + assert(IdentInfo_MPI_Ssend); + + IdentInfo_MPI_Issend = &ASTCtx.Idents.get("MPI_Issend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Issend); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Issend); + MPIType.push_back(IdentInfo_MPI_Issend); + assert(IdentInfo_MPI_Issend); + + IdentInfo_MPI_Bsend = &ASTCtx.Idents.get("MPI_Bsend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Bsend); + MPIType.push_back(IdentInfo_MPI_Bsend); + assert(IdentInfo_MPI_Bsend); + + IdentInfo_MPI_Ibsend = &ASTCtx.Idents.get("MPI_Ibsend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Ibsend); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Ibsend); + MPIType.push_back(IdentInfo_MPI_Ibsend); + assert(IdentInfo_MPI_Ibsend); + + IdentInfo_MPI_Rsend = &ASTCtx.Idents.get("MPI_Rsend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Rsend); + MPIType.push_back(IdentInfo_MPI_Rsend); + assert(IdentInfo_MPI_Rsend); + + IdentInfo_MPI_Irsend = &ASTCtx.Idents.get("MPI_Irsend"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Irsend); + MPIType.push_back(IdentInfo_MPI_Irsend); + assert(IdentInfo_MPI_Irsend); + + IdentInfo_MPI_Recv = &ASTCtx.Idents.get("MPI_Recv"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Recv); + MPIType.push_back(IdentInfo_MPI_Recv); + assert(IdentInfo_MPI_Recv); + + IdentInfo_MPI_Irecv = &ASTCtx.Idents.get("MPI_Irecv"); + MPIPointToPointTypes.push_back(IdentInfo_MPI_Irecv); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Irecv); + MPIType.push_back(IdentInfo_MPI_Irecv); + assert(IdentInfo_MPI_Irecv); +} + +void MPIFunctionClassifier::initCollectiveIdentifiers(AnalysisManager &AM) { + ASTContext &ASTCtx = AM.getASTContext(); + + // Copy identifiers into the correct classification containers. + IdentInfo_MPI_Scatter = &ASTCtx.Idents.get("MPI_Scatter"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Scatter); + MPIPointToCollTypes.push_back(IdentInfo_MPI_Scatter); + MPIType.push_back(IdentInfo_MPI_Scatter); + assert(IdentInfo_MPI_Scatter); + + IdentInfo_MPI_Iscatter = &ASTCtx.Idents.get("MPI_Iscatter"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Iscatter); + MPIPointToCollTypes.push_back(IdentInfo_MPI_Iscatter); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Iscatter); + MPIType.push_back(IdentInfo_MPI_Iscatter); + assert(IdentInfo_MPI_Iscatter); + + IdentInfo_MPI_Gather = &ASTCtx.Idents.get("MPI_Gather"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Gather); + MPICollToPointTypes.push_back(IdentInfo_MPI_Gather); + MPIType.push_back(IdentInfo_MPI_Gather); + assert(IdentInfo_MPI_Gather); + + IdentInfo_MPI_Igather = &ASTCtx.Idents.get("MPI_Igather"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Igather); + MPICollToPointTypes.push_back(IdentInfo_MPI_Igather); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Igather); + MPIType.push_back(IdentInfo_MPI_Igather); + assert(IdentInfo_MPI_Igather); + + IdentInfo_MPI_Allgather = &ASTCtx.Idents.get("MPI_Allgather"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Allgather); + MPICollToCollTypes.push_back(IdentInfo_MPI_Allgather); + MPIType.push_back(IdentInfo_MPI_Allgather); + assert(IdentInfo_MPI_Allgather); + + IdentInfo_MPI_Iallgather = &ASTCtx.Idents.get("MPI_Iallgather"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Iallgather); + MPICollToCollTypes.push_back(IdentInfo_MPI_Iallgather); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Iallgather); + MPIType.push_back(IdentInfo_MPI_Iallgather); + assert(IdentInfo_MPI_Iallgather); + + IdentInfo_MPI_Bcast = &ASTCtx.Idents.get("MPI_Bcast"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Bcast); + MPIPointToCollTypes.push_back(IdentInfo_MPI_Bcast); + MPIType.push_back(IdentInfo_MPI_Bcast); + assert(IdentInfo_MPI_Bcast); + + IdentInfo_MPI_Ibcast = &ASTCtx.Idents.get("MPI_Ibcast"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Ibcast); + MPIPointToCollTypes.push_back(IdentInfo_MPI_Ibcast); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Ibcast); + MPIType.push_back(IdentInfo_MPI_Ibcast); + assert(IdentInfo_MPI_Ibcast); + + IdentInfo_MPI_Reduce = &ASTCtx.Idents.get("MPI_Reduce"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Reduce); + MPICollToPointTypes.push_back(IdentInfo_MPI_Reduce); + MPIType.push_back(IdentInfo_MPI_Reduce); + assert(IdentInfo_MPI_Reduce); + + IdentInfo_MPI_Ireduce = &ASTCtx.Idents.get("MPI_Ireduce"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Ireduce); + MPICollToPointTypes.push_back(IdentInfo_MPI_Ireduce); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Ireduce); + MPIType.push_back(IdentInfo_MPI_Ireduce); + assert(IdentInfo_MPI_Ireduce); + + IdentInfo_MPI_Allreduce = &ASTCtx.Idents.get("MPI_Allreduce"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Allreduce); + MPICollToCollTypes.push_back(IdentInfo_MPI_Allreduce); + MPIType.push_back(IdentInfo_MPI_Allreduce); + assert(IdentInfo_MPI_Allreduce); + + IdentInfo_MPI_Iallreduce = &ASTCtx.Idents.get("MPI_Iallreduce"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Iallreduce); + MPICollToCollTypes.push_back(IdentInfo_MPI_Iallreduce); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Iallreduce); + MPIType.push_back(IdentInfo_MPI_Iallreduce); + assert(IdentInfo_MPI_Iallreduce); + + IdentInfo_MPI_Alltoall = &ASTCtx.Idents.get("MPI_Alltoall"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Alltoall); + MPICollToCollTypes.push_back(IdentInfo_MPI_Alltoall); + MPIType.push_back(IdentInfo_MPI_Alltoall); + assert(IdentInfo_MPI_Alltoall); + + IdentInfo_MPI_Ialltoall = &ASTCtx.Idents.get("MPI_Ialltoall"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Ialltoall); + MPICollToCollTypes.push_back(IdentInfo_MPI_Ialltoall); + MPINonBlockingTypes.push_back(IdentInfo_MPI_Ialltoall); + MPIType.push_back(IdentInfo_MPI_Ialltoall); + assert(IdentInfo_MPI_Ialltoall); +} + +void MPIFunctionClassifier::initAdditionalIdentifiers(AnalysisManager &AM) { + ASTContext &ASTCtx = AM.getASTContext(); + + IdentInfo_MPI_Comm_rank = &ASTCtx.Idents.get("MPI_Comm_rank"); + MPIType.push_back(IdentInfo_MPI_Comm_rank); + assert(IdentInfo_MPI_Comm_rank); + + IdentInfo_MPI_Comm_size = &ASTCtx.Idents.get("MPI_Comm_size"); + MPIType.push_back(IdentInfo_MPI_Comm_size); + assert(IdentInfo_MPI_Comm_size); + + IdentInfo_MPI_Wait = &ASTCtx.Idents.get("MPI_Wait"); + MPIType.push_back(IdentInfo_MPI_Wait); + assert(IdentInfo_MPI_Wait); + + IdentInfo_MPI_Waitall = &ASTCtx.Idents.get("MPI_Waitall"); + MPIType.push_back(IdentInfo_MPI_Waitall); + assert(IdentInfo_MPI_Waitall); + + IdentInfo_MPI_Barrier = &ASTCtx.Idents.get("MPI_Barrier"); + MPICollectiveTypes.push_back(IdentInfo_MPI_Barrier); + MPIType.push_back(IdentInfo_MPI_Barrier); + assert(IdentInfo_MPI_Barrier); +} + +// general identifiers +bool MPIFunctionClassifier::isMPIType(const IdentifierInfo *IdentInfo) const { + return llvm::is_contained(MPIType, IdentInfo); +} + +bool MPIFunctionClassifier::isNonBlockingType( + const IdentifierInfo *IdentInfo) const { + return llvm::is_contained(MPINonBlockingTypes, IdentInfo); +} + +// point-to-point identifiers +bool MPIFunctionClassifier::isPointToPointType( + const IdentifierInfo *IdentInfo) const { + return llvm::is_contained(MPIPointToPointTypes, IdentInfo); +} + +// collective identifiers +bool MPIFunctionClassifier::isCollectiveType( + const IdentifierInfo *IdentInfo) const { + return llvm::is_contained(MPICollectiveTypes, IdentInfo); +} + +bool MPIFunctionClassifier::isCollToColl( + const IdentifierInfo *IdentInfo) const { + return llvm::is_contained(MPICollToCollTypes, IdentInfo); +} + +bool MPIFunctionClassifier::isScatterType( + const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Scatter || + IdentInfo == IdentInfo_MPI_Iscatter; +} + +bool MPIFunctionClassifier::isGatherType( + const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Gather || + IdentInfo == IdentInfo_MPI_Igather || + IdentInfo == IdentInfo_MPI_Allgather || + IdentInfo == IdentInfo_MPI_Iallgather; +} + +bool MPIFunctionClassifier::isAllgatherType( + const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Allgather || + IdentInfo == IdentInfo_MPI_Iallgather; +} + +bool MPIFunctionClassifier::isAlltoallType( + const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Alltoall || + IdentInfo == IdentInfo_MPI_Ialltoall; +} + +bool MPIFunctionClassifier::isBcastType(const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Bcast || IdentInfo == IdentInfo_MPI_Ibcast; +} + +bool MPIFunctionClassifier::isReduceType( + const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Reduce || + IdentInfo == IdentInfo_MPI_Ireduce || + IdentInfo == IdentInfo_MPI_Allreduce || + IdentInfo == IdentInfo_MPI_Iallreduce; +} + +// additional identifiers +bool MPIFunctionClassifier::isMPI_Wait(const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Wait; +} + +bool MPIFunctionClassifier::isMPI_Waitall( + const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Waitall; +} + +bool MPIFunctionClassifier::isWaitType(const IdentifierInfo *IdentInfo) const { + return IdentInfo == IdentInfo_MPI_Wait || IdentInfo == IdentInfo_MPI_Waitall; +} + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang Index: cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h +++ cfe/trunk/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h @@ -0,0 +1,66 @@ +//===-- MPITypes.h - Functionality to model MPI concepts --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file provides definitions to model concepts of MPI. The mpi::Request +/// class defines a wrapper class, in order to make MPI requests trackable for +/// path-sensitive analysis. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPITYPES_H +#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPITYPES_H + +#include "MPIFunctionClassifier.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" +#include "llvm/ADT/SmallSet.h" + +namespace clang { +namespace ento { +namespace mpi { + +class Request { +public: + enum State : unsigned char { Nonblocking, Wait }; + + Request(State S) : CurrentState{S} {} + + void Profile(llvm::FoldingSetNodeID &Id) const { + Id.AddInteger(CurrentState); + } + + bool operator==(const Request &ToCompare) const { + return CurrentState == ToCompare.CurrentState; + } + + const State CurrentState; +}; + +} // end of namespace: mpi +} // end of namespace: ento +} // end of namespace: clang + +// The RequestMap stores MPI requests which are identified by their memory +// region. Requests are used in MPI to complete nonblocking operations with wait +// operations. A custom map implementation is used, in order to make it +// available in an arbitrary amount of translation units. +struct RequestMap {}; +typedef llvm::ImmutableMap + RequestMapImpl; +template <> +struct clang::ento::ProgramStateTrait + : public clang::ento::ProgramStatePartialTrait { + static void *GDMIndex() { + static int index = 0; + return &index; + } +}; + +#endif Index: cfe/trunk/lib/StaticAnalyzer/Core/MemRegion.cpp =================================================================== --- cfe/trunk/lib/StaticAnalyzer/Core/MemRegion.cpp +++ cfe/trunk/lib/StaticAnalyzer/Core/MemRegion.cpp @@ -632,6 +632,65 @@ superRegion->printPrettyAsExpr(os); } +std::string MemRegion::getDescriptiveName(bool UseQuotes) const { + std::string VariableName; + std::string ArrayIndices; + const MemRegion *R = this; + SmallString<50> buf; + llvm::raw_svector_ostream os(buf); + + // Obtain array indices to add them to the variable name. + const ElementRegion *ER = nullptr; + while ((ER = R->getAs())) { + // Index is a ConcreteInt. + if (auto CI = ER->getIndex().getAs()) { + llvm::SmallString<2> Idx; + CI->getValue().toString(Idx); + ArrayIndices = (llvm::Twine("[") + Idx.str() + "]" + ArrayIndices).str(); + } + // If not a ConcreteInt, try to obtain the variable + // name by calling 'getDescriptiveName' recursively. + else { + std::string Idx = ER->getDescriptiveName(false); + if (!Idx.empty()) { + ArrayIndices = (llvm::Twine("[") + Idx + "]" + ArrayIndices).str(); + } + } + R = ER->getSuperRegion(); + } + + // Get variable name. + if (R && R->canPrintPrettyAsExpr()) { + R->printPrettyAsExpr(os); + if (UseQuotes) { + return (llvm::Twine("'") + os.str() + ArrayIndices + "'").str(); + } else { + return (llvm::Twine(os.str()) + ArrayIndices).str(); + } + } + + return VariableName; +} + +SourceRange MemRegion::sourceRange() const { + const VarRegion *const VR = dyn_cast(this->getBaseRegion()); + const FieldRegion *const FR = dyn_cast(this); + + // Check for more specific regions first. + // FieldRegion + if (FR) { + return FR->getDecl()->getSourceRange(); + } + // VarRegion + else if (VR) { + return VR->getDecl()->getSourceRange(); + } + // Return invalid source range (can be checked by client). + else { + return SourceRange{}; + } +} + //===----------------------------------------------------------------------===// // MemRegionManager methods. //===----------------------------------------------------------------------===// Index: cfe/trunk/test/Analysis/MPIMock.h =================================================================== --- cfe/trunk/test/Analysis/MPIMock.h +++ cfe/trunk/test/Analysis/MPIMock.h @@ -0,0 +1,55 @@ +// Message Passing Interface mock header. Mocks MPI constants and functions, in +// order to make them available in distinct integration test files. + +#define NULL 0 + +// mock types +typedef int MPI_Datatype; +typedef int MPI_Comm; +typedef int MPI_Request; +typedef int MPI_Status; +typedef int MPI_Op; +typedef int int8_t; +typedef int uint8_t; +typedef int uint16_t; +typedef int int64_t; +namespace std { template struct complex { T real; T imag; }; } + +// mock constants +#define MPI_DATATYPE_NULL 0 +#define MPI_CHAR 0 +#define MPI_BYTE 0 +#define MPI_INT 0 +#define MPI_LONG 0 +#define MPI_LONG_DOUBLE 0 +#define MPI_UNSIGNED 0 +#define MPI_INT8_T 0 +#define MPI_UINT8_T 0 +#define MPI_UINT16_T 0 +#define MPI_C_LONG_DOUBLE_COMPLEX 0 +#define MPI_FLOAT 0 +#define MPI_DOUBLE 0 +#define MPI_CXX_BOOL 0 +#define MPI_CXX_FLOAT_COMPLEX 0 +#define MPI_CXX_DOUBLE_COMPLEX 0 +#define MPI_CXX_LONG_DOUBLE_COMPLEX 0 +#define MPI_IN_PLACE 0 +#define MPI_COMM_WORLD 0 +#define MPI_STATUS_IGNORE 0 +#define MPI_STATUSES_IGNORE 0 +#define MPI_SUM 0 + +// mock functions +int MPI_Comm_size(MPI_Comm, int *); +int MPI_Comm_rank(MPI_Comm, int *); +int MPI_Send(const void *, int, MPI_Datatype, int, int, MPI_Comm); +int MPI_Recv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status *); +int MPI_Isend(const void *, int, MPI_Datatype, int, int, MPI_Comm, + MPI_Request *); +int MPI_Irecv(void *, int, MPI_Datatype, int, int, MPI_Comm, MPI_Request *); +int MPI_Wait(MPI_Request *, MPI_Status *); +int MPI_Waitall(int, MPI_Request[], MPI_Status[]); +int MPI_Reduce(const void *, void *, int, MPI_Datatype, MPI_Op, int, MPI_Comm); +int MPI_Ireduce(const void *, void *, int, MPI_Datatype, MPI_Op, int, MPI_Comm, + MPI_Request *); +int MPI_Bcast(void *, int count, MPI_Datatype, int, MPI_Comm); Index: cfe/trunk/test/Analysis/MemRegion.cpp =================================================================== --- cfe/trunk/test/Analysis/MemRegion.cpp +++ cfe/trunk/test/Analysis/MemRegion.cpp @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=optin.mpi.MPI-Checker -verify %s + +#include "MPIMock.h" + +// Use MPI-Checker to test 'getDescriptiveName', as the checker uses the +// function for diagnostics. +void testGetDescriptiveName() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request sendReq1; + MPI_Wait(&sendReq1, MPI_STATUS_IGNORE); // expected-warning{{Request 'sendReq1' has no matching nonblocking call.}} +} + +void testGetDescriptiveName2() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request sendReq1[10][10][10]; + MPI_Wait(&sendReq1[1][7][9], MPI_STATUS_IGNORE); // expected-warning{{Request 'sendReq1[1][7][9]' has no matching nonblocking call.}} +} + +void testGetDescriptiveName3() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + typedef struct { MPI_Request req; } ReqStruct; + ReqStruct rs; + MPI_Request *r = &rs.req; + MPI_Wait(r, MPI_STATUS_IGNORE); // expected-warning{{Request 'rs.req' has no matching nonblocking call.}} +} + +void testGetDescriptiveName4() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + typedef struct { MPI_Request req[2][2]; } ReqStruct; + ReqStruct rs; + MPI_Request *r = &rs.req[0][1]; + MPI_Wait(r, MPI_STATUS_IGNORE); // expected-warning{{Request 'rs.req[0][1]' has no matching nonblocking call.}} +} + +void testGetDescriptiveName5() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + typedef struct { MPI_Request req; } ReqStructInner; + typedef struct { ReqStructInner req; } ReqStruct; + ReqStruct rs; + MPI_Request *r = &rs.req.req; + MPI_Wait(r, MPI_STATUS_IGNORE); // expected-warning{{Request 'rs.req.req' has no matching nonblocking call.}} +} Index: cfe/trunk/test/Analysis/mpichecker.cpp =================================================================== --- cfe/trunk/test/Analysis/mpichecker.cpp +++ cfe/trunk/test/Analysis/mpichecker.cpp @@ -0,0 +1,342 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=optin.mpi.MPI-Checker -verify %s + +#include "MPIMock.h" + +void matchedWait1() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank >= 0) { + MPI_Request sendReq1, recvReq1; + MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &sendReq1); + MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &recvReq1); + + MPI_Wait(&sendReq1, MPI_STATUS_IGNORE); + MPI_Wait(&recvReq1, MPI_STATUS_IGNORE); + } +} // no error + +void matchedWait2() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank >= 0) { + MPI_Request sendReq1, recvReq1; + MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &sendReq1); + MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &recvReq1); + MPI_Wait(&sendReq1, MPI_STATUS_IGNORE); + MPI_Wait(&recvReq1, MPI_STATUS_IGNORE); + } +} // no error + +void matchedWait3() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank >= 0) { + MPI_Request sendReq1, recvReq1; + MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &sendReq1); + MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &recvReq1); + + if (rank > 1000) { + MPI_Wait(&sendReq1, MPI_STATUS_IGNORE); + MPI_Wait(&recvReq1, MPI_STATUS_IGNORE); + } else { + MPI_Wait(&sendReq1, MPI_STATUS_IGNORE); + MPI_Wait(&recvReq1, MPI_STATUS_IGNORE); + } + } +} // no error + +void missingWait1() { // Check missing wait for dead region. + double buf = 0; + MPI_Request sendReq1; + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &sendReq1); +} // expected-warning{{Request 'sendReq1' has no matching wait.}} + +void missingWait2() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank == 0) { + } else { + MPI_Request sendReq1, recvReq1; + + MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &sendReq1); + MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &recvReq1); // expected-warning{{Request 'sendReq1' has no matching wait.}} + MPI_Wait(&recvReq1, MPI_STATUS_IGNORE); + } +} + +void doubleNonblocking() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (rank == 1) { + } else { + MPI_Request sendReq1; + + MPI_Isend(&buf, 1, MPI_DOUBLE, rank + 1, 0, MPI_COMM_WORLD, &sendReq1); + MPI_Irecv(&buf, 1, MPI_DOUBLE, rank - 1, 0, MPI_COMM_WORLD, &sendReq1); // expected-warning{{Double nonblocking on request 'sendReq1'.}} + MPI_Wait(&sendReq1, MPI_STATUS_IGNORE); + } +} + +void doubleNonblocking2() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Request req; + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &req); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &req); // expected-warning{{Double nonblocking on request 'req'.}} + MPI_Wait(&req, MPI_STATUS_IGNORE); +} + +void doubleNonblocking3() { + typedef struct { MPI_Request req; } ReqStruct; + + ReqStruct rs; + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &rs.req); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &rs.req); // expected-warning{{Double nonblocking on request 'rs.req'.}} + MPI_Wait(&rs.req, MPI_STATUS_IGNORE); +} + +void doubleNonblocking4() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Request req; + for (int i = 0; i < 2; ++i) { + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &req); // expected-warning{{Double nonblocking on request 'req'.}} + } + MPI_Wait(&req, MPI_STATUS_IGNORE); +} + +void tripleNonblocking() { + double buf = 0; + MPI_Request sendReq; + MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); + MPI_Irecv(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-warning{{Double nonblocking on request 'sendReq'.}} + MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-warning{{Double nonblocking on request 'sendReq'.}} + MPI_Wait(&sendReq, MPI_STATUS_IGNORE); +} + +void missingNonBlocking() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request sendReq1[10][10][10]; + MPI_Wait(&sendReq1[1][7][9], MPI_STATUS_IGNORE); // expected-warning{{Request 'sendReq1[1][7][9]' has no matching nonblocking call.}} +} + +void missingNonBlocking2() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + typedef struct { MPI_Request req[2][2]; } ReqStruct; + ReqStruct rs; + MPI_Request *r = &rs.req[0][1]; + MPI_Wait(r, MPI_STATUS_IGNORE); // expected-warning{{Request 'rs.req[0][1]' has no matching nonblocking call.}} +} + +void missingNonBlocking3() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request sendReq; + MPI_Wait(&sendReq, MPI_STATUS_IGNORE); // expected-warning{{Request 'sendReq' has no matching nonblocking call.}} +} + +void missingNonBlockingMultiple() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request sendReq[4]; + for (int i = 0; i < 4; ++i) { + MPI_Wait(&sendReq[i], MPI_STATUS_IGNORE); // expected-warning-re 1+{{Request {{.*}} has no matching nonblocking call.}} + } +} + +void missingNonBlockingWaitall() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request req[4]; + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[0]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[1]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[3]); + + MPI_Waitall(4, req, MPI_STATUSES_IGNORE); // expected-warning{{Request 'req[2]' has no matching nonblocking call.}} +} + +void missingNonBlockingWaitall2() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request req[4]; + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[0]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[3]); + + MPI_Waitall(4, req, MPI_STATUSES_IGNORE); // expected-warning-re 2{{Request '{{(.*)[[1-2]](.*)}}' has no matching nonblocking call.}} +} + +void missingNonBlockingWaitall3() { + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request req[4]; + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[0]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req[2]); + + MPI_Waitall(4, req, MPI_STATUSES_IGNORE); // expected-warning-re 2{{Request '{{(.*)[[1,3]](.*)}}' has no matching nonblocking call.}} +} + +void missingNonBlockingWaitall4() { + int rank = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Request req[4]; + MPI_Waitall(4, req, MPI_STATUSES_IGNORE); // expected-warning-re 4{{Request '{{(.*)[[0-3]](.*)}}' has no matching nonblocking call.}} +} + +void noDoubleRequestUsage() { + typedef struct { + MPI_Request req; + MPI_Request req2; + } ReqStruct; + + ReqStruct rs; + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req2); + MPI_Wait(&rs.req, MPI_STATUS_IGNORE); + MPI_Wait(&rs.req2, MPI_STATUS_IGNORE); +} // no error + +void noDoubleRequestUsage2() { + typedef struct { + MPI_Request req[2]; + MPI_Request req2; + } ReqStruct; + + ReqStruct rs; + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req[0]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req[1]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req2); + MPI_Wait(&rs.req[0], MPI_STATUS_IGNORE); + MPI_Wait(&rs.req[1], MPI_STATUS_IGNORE); + MPI_Wait(&rs.req2, MPI_STATUS_IGNORE); +} // no error + +void nestedRequest() { + typedef struct { + MPI_Request req[2]; + MPI_Request req2; + } ReqStruct; + + ReqStruct rs; + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req[0]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req[1]); + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &rs.req2); + MPI_Waitall(2, rs.req, MPI_STATUSES_IGNORE); + MPI_Wait(&rs.req2, MPI_STATUS_IGNORE); +} // no error + +void singleRequestInWaitall() { + MPI_Request r; + int rank = 0; + double buf = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &r); + MPI_Waitall(1, &r, MPI_STATUSES_IGNORE); +} // no error + +void multiRequestUsage() { + double buf = 0; + MPI_Request req; + + MPI_Isend(&buf, 1, MPI_DOUBLE, 1, 0, MPI_COMM_WORLD, &req); + MPI_Wait(&req, MPI_STATUS_IGNORE); + + MPI_Irecv(&buf, 1, MPI_DOUBLE, 1, 0, MPI_COMM_WORLD, &req); + MPI_Wait(&req, MPI_STATUS_IGNORE); +} // no error + +void multiRequestUsage2() { + double buf = 0; + MPI_Request req; + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req); + MPI_Wait(&req, MPI_STATUS_IGNORE); + + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req); + MPI_Wait(&req, MPI_STATUS_IGNORE); +} // no error + +// wrapper function +void callNonblocking(MPI_Request *req) { + double buf = 0; + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + req); +} + +// wrapper function +void callWait(MPI_Request *req) { + MPI_Wait(req, MPI_STATUS_IGNORE); +} + +// Call nonblocking, wait wrapper functions. +void callWrapperFunctions() { + MPI_Request req; + callNonblocking(&req); + callWait(&req); +} // no error + +void externFunctions1() { + double buf = 0; + MPI_Request req; + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, + &req); + void callWaitExtern(MPI_Request *req); + callWaitExtern(&req); +} // expected-warning{{Request 'req' has no matching wait.}} + +void externFunctions2() { + MPI_Request req; + void callNonblockingExtern(MPI_Request *req); + callNonblockingExtern(&req); +} Index: cfe/trunk/test/Analysis/mpicheckernotes.cpp =================================================================== --- cfe/trunk/test/Analysis/mpicheckernotes.cpp +++ cfe/trunk/test/Analysis/mpicheckernotes.cpp @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=optin.mpi.MPI-Checker -analyzer-output=text -verify %s + +// MPI-Checker test file to test note diagnostics. + +#include "MPIMock.h" + +void doubleNonblocking() { + double buf = 0; + MPI_Request sendReq; + MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-note{{Request is previously used by nonblocking call here.}} + MPI_Irecv(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-warning{{Double nonblocking on request 'sendReq'.}} expected-note{{Double nonblocking on request 'sendReq'.}} + MPI_Wait(&sendReq, MPI_STATUS_IGNORE); +} + +void missingWait() { + double buf = 0; + MPI_Request sendReq; + MPI_Ireduce(MPI_IN_PLACE, &buf, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD, &sendReq); // expected-note{{Request is previously used by nonblocking call here.}} +} // expected-warning{{Request 'sendReq' has no matching wait.}} expected-note{{Request 'sendReq' has no matching wait.}} + +// If more than 2 nonblocking calls are using a request in a sequence, they all +// point to the first call as the 'previous' call. This is because the +// BugReporterVisitor only checks for differences in state or existence of an +// entity. +void tripleNonblocking() { + double buf = 0; + MPI_Request sendReq; + MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-note 2{{Request is previously used by nonblocking call here.}} + MPI_Irecv(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-warning{{Double nonblocking on request 'sendReq'.}} expected-note{{Double nonblocking on request 'sendReq'.}} + + MPI_Isend(&buf, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD, &sendReq); // expected-warning{{Double nonblocking on request 'sendReq'.}} expected-note{{Double nonblocking on request 'sendReq'.}} + + MPI_Wait(&sendReq, MPI_STATUS_IGNORE); +}