Index: C:/LLVM/llvm/tools/clang/utils/analyzer/CmpRuns.py =================================================================== --- C:/LLVM/llvm/tools/clang/utils/analyzer/CmpRuns.py +++ C:/LLVM/llvm/tools/clang/utils/analyzer/CmpRuns.py @@ -28,7 +28,7 @@ import os import plistlib -import CmpRuns +import CmpRuns # ? # Information about analysis run: # path - the analysis output directory @@ -40,6 +40,7 @@ self.root = root.rstrip("/\\") self.verboseLog = verboseLog + class AnalysisDiagnostic: def __init__(self, data, report, htmlReport): self._data = data @@ -51,7 +52,7 @@ root = self._report.run.root fileName = self._report.files[self._loc['file']] if fileName.startswith(root) and len(root) > 0: - return fileName[len(root)+1:] + return fileName[len(root) + 1:] return fileName def getLine(self): @@ -67,12 +68,12 @@ return self._data['description'] def getIssueIdentifier(self) : - id = self.getFileName() + "+" - if 'issue_context' in self._data : - id += self._data['issue_context'] + "+" - if 'issue_hash_content_of_line_in_context' in self._data : - id += str(self._data['issue_hash_content_of_line_in_context']) - return id + myid = self.getFileName() + "+" + if 'issue_context' in self._data: + myid += self._data['issue_context'] + "+" + if 'issue_hash_content_of_line_in_context' in self._data: + myid += str(self._data['issue_hash_content_of_line_in_context']) + return myid def getReport(self): if self._htmlReport is None: @@ -89,42 +90,52 @@ def getRawData(self): return self._data + class multidict: def __init__(self, elts=()): self.data = {} - for key,value in elts: + for key, value in elts: self[key] = value def __getitem__(self, item): return self.data[item] + def __setitem__(self, key, value): if key in self.data: self.data[key].append(value) else: self.data[key] = [value] + def items(self): return self.data.items() + def values(self): return self.data.values() + def keys(self): return self.data.keys() + def __len__(self): return len(self.data) + def get(self, key, default=None): return self.data.get(key, default) + class CmpOptions: def __init__(self, verboseLog=None, rootA="", rootB=""): self.rootA = rootA self.rootB = rootB self.verboseLog = verboseLog + class AnalysisReport: def __init__(self, run, files): self.run = run self.files = files self.diagnostics = [] + class AnalysisRun: def __init__(self, info): self.path = info.path @@ -145,14 +156,14 @@ # reports. Assume that all reports were created using the same # clang version (this is always true and is more efficient). if 'clang_version' in data: - if self.clang_version == None: + if self.clang_version is None: self.clang_version = data.pop('clang_version') else: data.pop('clang_version') # Ignore/delete empty reports. if not data['files']: - if deleteEmpty == True: + if deleteEmpty: os.remove(p) return @@ -169,7 +180,7 @@ report = AnalysisReport(self, data.pop('files')) diagnostics = [AnalysisDiagnostic(d, report, h) - for d,h in zip(data.pop('diagnostics'), + for d, h in zip(data.pop('diagnostics'), htmlFiles)] assert not data @@ -180,10 +191,11 @@ # Backward compatibility API. -def loadResults(path, opts, root = "", deleteEmpty=True): +def loadResults(path, opts, root="", deleteEmpty=True): return loadResultsFromSingleRun(SingleRunInfo(path, root, opts.verboseLog), deleteEmpty) + # Load results of the analyzes from a given output folder. # - info is the SingleRunInfo object # - deleteEmpty specifies if the empty plist files should be deleted @@ -203,9 +215,11 @@ return run -def cmpAnalysisDiagnostic(d) : + +def cmpAnalysisDiagnostic(d): return d.getIssueIdentifier() + def compareResults(A, B): """ compareResults - Generate a relation from diagnostics in run A to @@ -224,12 +238,12 @@ neqB = [] eltsA = list(A.diagnostics) eltsB = list(B.diagnostics) - eltsA.sort(key = cmpAnalysisDiagnostic) - eltsB.sort(key = cmpAnalysisDiagnostic) + eltsA.sort(key=cmpAnalysisDiagnostic) + eltsB.sort(key=cmpAnalysisDiagnostic) while eltsA and eltsB: a = eltsA.pop() b = eltsB.pop() - if (a.getIssueIdentifier() == b.getIssueIdentifier()) : + if (a.getIssueIdentifier() == b.getIssueIdentifier()): res.append((a, b, 0)) elif a.getIssueIdentifier() > b.getIssueIdentifier(): eltsB.append(b) @@ -253,6 +267,7 @@ return res + def dumpScanBuildResultsDiff(dirA, dirB, opts, deleteEmpty=True): # Load the run results. resultsA = loadResults(dirA, opts, opts.rootA, deleteEmpty) @@ -267,7 +282,7 @@ diff = compareResults(resultsA, resultsB) foundDiffs = 0 for res in diff: - a,b,confidence = res + a, b, confidence = res if a is None: print "ADDED: %r" % b.getReadableName() foundDiffs += 1 @@ -302,6 +317,7 @@ return foundDiffs, len(resultsA.diagnostics), len(resultsB.diagnostics) + def main(): from optparse import OptionParser parser = OptionParser("usage: %prog [options] [dir A] [dir B]") @@ -320,7 +336,7 @@ if len(args) != 2: parser.error("invalid number of arguments") - dirA,dirB = args + dirA, dirB = args dumpScanBuildResultsDiff(dirA, dirB, opts) Index: C:/LLVM/llvm/tools/clang/utils/analyzer/SATestAdd.py =================================================================== --- C:/LLVM/llvm/tools/clang/utils/analyzer/SATestAdd.py +++ C:/LLVM/llvm/tools/clang/utils/analyzer/SATestAdd.py @@ -46,18 +46,19 @@ import csv import sys -def isExistingProject(PMapFile, projectID) : +def isExistingProject(PMapFile, projectID): PMapReader = csv.reader(PMapFile) for I in PMapReader: if projectID == I[0]: return True return False + # Add a new project for testing: build it and add to the Project Map file. # Params: # Dir is the directory where the sources are. # ID is a short string used to identify a project. -def addNewProject(ID, BuildMode) : +def addNewProject(ID, BuildMode): CurDir = os.path.abspath(os.curdir) Dir = SATestBuild.getProjectDir(ID) if not os.path.exists(Dir): @@ -75,13 +76,13 @@ print "Warning: Creating the Project Map file!!" PMapFile = open(ProjectMapPath, "w+b") try: - if (isExistingProject(PMapFile, ID)) : + if (isExistingProject(PMapFile, ID)): print >> sys.stdout, 'Warning: Project with ID \'', ID, \ '\' already exists.' print >> sys.stdout, "Reference output has been regenerated." else: PMapWriter = csv.writer(PMapFile) - PMapWriter.writerow( (ID, int(BuildMode)) ); + PMapWriter.writerow((ID, int(BuildMode))) print "The project map is updated: ", ProjectMapPath finally: PMapFile.close() Index: C:/LLVM/llvm/tools/clang/utils/analyzer/SATestBuild.py =================================================================== --- C:/LLVM/llvm/tools/clang/utils/analyzer/SATestBuild.py +++ C:/LLVM/llvm/tools/clang/utils/analyzer/SATestBuild.py @@ -3,8 +3,8 @@ """ Static Analyzer qualification infrastructure. -The goal is to test the analyzer against different projects, check for failures, -compare results, and measure performance. +The goal is to test the analyzer against different projects, +check for failures, compare results, and measure performance. Repository Directory will contain sources of the projects as well as the information on how to build them and the expected output. @@ -19,8 +19,9 @@ Note that the build tree must be inside the project dir. To test the build of the analyzer one would: - - Copy over a copy of the Repository Directory. (TODO: Prefer to ensure that - the build directory does not pollute the repository to min network traffic). + - Copy over a copy of the Repository Directory. + (TODO: Prefer to ensure that the build directory does not pollute + the repository to min network traffic). - Build all projects, until error. Produce logs to report errors. - Compare results. @@ -64,7 +65,7 @@ """ # Linux, Unix and MacOS: if hasattr(os, "sysconf"): - if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"): + if "SC_NPROCESSORS_ONLN" in os.sysconf_names: # Linux & Unix: ncpus = os.sysconf("SC_NPROCESSORS_ONLN") if isinstance(ncpus, int) and ncpus > 0: @@ -72,18 +73,18 @@ else: # OSX: return int(capture(['sysctl', '-n', 'hw.ncpu'])) # Windows: - if os.environ.has_key("NUMBER_OF_PROCESSORS"): + if "NUMBER_OF_PROCESSORS" in os.environ: ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]) if ncpus > 0: return ncpus return 1 # Default -def which(command, paths = None): +def which(command, paths=None): """which(command, [paths]) - Look up the given command in the paths string (or the PATH environment variable, if unspecified).""" if paths is None: - paths = os.environ.get('PATH','') + paths = os.environ.get('PATH', '') # Check for absolute match first. if os.path.exists(command): @@ -109,32 +110,38 @@ return None + # Make sure we flush the output after every print statement. class flushfile(object): def __init__(self, f): self.f = f + def write(self, x): self.f.write(x) self.f.flush() + sys.stdout = flushfile(sys.stdout) + def getProjectMapPath(): ProjectMapPath = os.path.join(os.path.abspath(os.curdir), ProjectMapFile) if not os.path.exists(ProjectMapPath): print "Error: Cannot find the Project Map file " + ProjectMapPath +\ - "\nRunning script for the wrong directory?" + "\nRunning script for the wrong directory?" sys.exit(-1) return ProjectMapPath + def getProjectDir(ID): return os.path.join(os.path.abspath(os.curdir), ID) -def getSBOutputDirName(IsReferenceBuild) : - if IsReferenceBuild == True : + +def getSBOutputDirName(IsReferenceBuild): + if IsReferenceBuild: return SBOutputDirReferencePrefix + SBOutputDirName - else : + else: return SBOutputDirName #------------------------------------------------------------------------------ @@ -164,8 +171,8 @@ # The log file name. LogFolderName = "Logs" BuildLogName = "run_static_analyzer.log" -# Summary file - contains the summary of the failures. Ex: This info can be be -# displayed when buildbot detects a build failure. +# Summary file - contains the summary of the failures. +# Ex: This info can be be displayed when buildbot detects a build failure. NumOfFailuresInSummary = 10 FailuresSummaryFileName = "failures.txt" # Summary of the result diffs. @@ -175,9 +182,10 @@ SBOutputDirName = "ScanBuildResults" SBOutputDirReferencePrefix = "Ref" -# The name of the directory storing the cached project source. If this directory -# does not exist, the download script will be executed. That script should -# create the "CachedSource" directory and download the project source into it. +# The name of the directory storing the cached project source. If this +# directory does not exist, the download script will be executed. That +# script should create the "CachedSource" directory and download the project +# source into it. CachedSourceDirName = "CachedSource" # The name of the directory containing the source code that will be analyzed. @@ -193,7 +201,7 @@ # The list of checkers used during analyzes. # Currently, consists of all the non-experimental checkers, plus a few alpha # checkers we don't want to regress on. -Checkers="alpha.unix.SimpleStream,alpha.security.taint,cplusplus.NewDeleteLeaks,core,cplusplus,deadcode,security,unix,osx" +Checkers = "alpha.unix.SimpleStream,alpha.security.taint,cplusplus.NewDeleteLeaks,core,cplusplus,deadcode,security,unix,osx" Verbose = 1 @@ -207,29 +215,30 @@ ScriptPath = os.path.join(Dir, CleanupScript) runScript(ScriptPath, PBuildLogFile, Cwd) + # Run the script to download the project, if it exists. def runDownloadScript(Dir, PBuildLogFile): ScriptPath = os.path.join(Dir, DownloadScript) runScript(ScriptPath, PBuildLogFile, Dir) + # Run the provided script if it exists. def runScript(ScriptPath, PBuildLogFile, Cwd): if os.path.exists(ScriptPath): try: if Verbose == 1: print " Executing: %s" % (ScriptPath,) - check_call("chmod +x '%s'" % ScriptPath, cwd = Cwd, - stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) - check_call("'%s'" % ScriptPath, cwd = Cwd, stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + check_call("chmod +x '%s'" % ScriptPath, cwd=Cwd, + stderr=PBuildLogFile, stdout=PBuildLogFile, + shell=True) + check_call("'%s'" % ScriptPath, cwd=Cwd, stderr=PBuildLogFile, + stdout=PBuildLogFile, shell=True) except: print "Error: Running %s failed. See %s for details." % (ScriptPath, - PBuildLogFile.name) + PBuildLogFile.name) sys.exit(-1) + # Download the project and apply the local patchfile if it exists. def downloadAndPatch(Dir, PBuildLogFile): CachedSourceDirPath = os.path.join(Dir, CachedSourceDirName) @@ -252,6 +261,7 @@ shutil.copytree(CachedSourceDirPath, PatchedSourceDirPath, symlinks=True) applyPatch(Dir, PBuildLogFile) + def applyPatch(Dir, PBuildLogFile): PatchfilePath = os.path.join(Dir, PatchfileName) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) @@ -262,14 +272,15 @@ print " Applying patch." try: check_call("patch -p1 < '%s'" % (PatchfilePath), - cwd = PatchedSourceDirPath, - stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + cwd=PatchedSourceDirPath, + stderr=PBuildLogFile, + stdout=PBuildLogFile, + shell=True) except: print "Error: Patch failed. See %s for details." % (PBuildLogFile.name) sys.exit(-1) + # Build the project with scan-build by reading in the commands and # prefixing them with the scan-build options. def runScanBuild(Dir, SBOutputDir, PBuildLogFile): @@ -279,13 +290,13 @@ sys.exit(-1) AllCheckers = Checkers - if os.environ.has_key('SA_ADDITIONAL_CHECKERS'): + if 'SA_ADDITIONAL_CHECKERS' in os.environ: AllCheckers = AllCheckers + ',' + os.environ['SA_ADDITIONAL_CHECKERS'] # Run scan-build from within the patched source directory. SBCwd = os.path.join(Dir, PatchedSourceDirName) - SBOptions = "--use-analyzer '%s' " % Clang + SBOptions = "--use-analyzer '%s' " % Clang SBOptions += "-plist-html -o '%s' " % SBOutputDir SBOptions += "-enable-checker " + AllCheckers + " " SBOptions += "--keep-empty " @@ -298,38 +309,40 @@ for Command in SBCommandFile: Command = Command.strip() if len(Command) == 0: - continue; + continue # If using 'make', auto imply a -jX argument # to speed up analysis. xcodebuild will # automatically use the maximum number of cores. if (Command.startswith("make ") or Command == "make") and \ - "-j" not in Command: + "-j" not in Command: Command += " -j%d" % Jobs SBCommand = SBPrefix + Command if Verbose == 1: - print " Executing: %s" % (SBCommand,) - check_call(SBCommand, cwd = SBCwd, stderr=PBuildLogFile, - stdout=PBuildLogFile, - shell=True) + print " Executing: %s" % (SBCommand, ) + check_call(SBCommand, cwd=SBCwd, stderr=PBuildLogFile, + stdout=PBuildLogFile, shell=True) except: - print "Error: scan-build failed. See ",PBuildLogFile.name,\ + print "Error: scan-build failed. See ", PBuildLogFile.name,\ " for details." raise + def hasNoExtension(FileName): (Root, Ext) = os.path.splitext(FileName) - if ((Ext == "")) : + if ((Ext == "")): return True return False + def isValidSingleInputFile(FileName): (Root, Ext) = os.path.splitext(FileName) - if ((Ext == ".i") | (Ext == ".ii") | - (Ext == ".c") | (Ext == ".cpp") | - (Ext == ".m") | (Ext == "")) : + if ((Ext == ".i") or (Ext == ".ii") or + (Ext == ".c") or (Ext == ".cpp") or + (Ext == ".m") or (Ext == "")): return True return False + # Get the path to the SDK for the given SDK name. Returns None if # the path cannot be determined. def getSDKPath(SDKName): @@ -339,11 +352,12 @@ Cmd = "xcrun --sdk " + SDKName + " --show-sdk-path" return check_output(Cmd, shell=True).rstrip() + # Run analysis on a set of preprocessed files. def runAnalyzePreprocessed(Dir, SBOutputDir, Mode): if os.path.exists(os.path.join(Dir, BuildScript)): print "Error: The preprocessed files project should not contain %s" % \ - BuildScript + BuildScript raise Exception() CmdPrefix = Clang + " -cc1 " @@ -355,14 +369,14 @@ CmdPrefix += "-isysroot " + SDKPath + " " CmdPrefix += "-analyze -analyzer-output=plist -w " - CmdPrefix += "-analyzer-checker=" + Checkers +" -fcxx-exceptions -fblocks " + CmdPrefix += "-analyzer-checker=" + Checkers + " -fcxx-exceptions -fblocks " - if (Mode == 2) : + if (Mode == 2): CmdPrefix += "-std=c++11 " PlistPath = os.path.join(Dir, SBOutputDir, "date") - FailPath = os.path.join(PlistPath, "failures"); - os.makedirs(FailPath); + FailPath = os.path.join(PlistPath, "failures") + os.makedirs(FailPath) for FullFileName in glob.glob(Dir + "/*"): FileName = os.path.basename(FullFileName) @@ -371,7 +385,7 @@ # Only run the analyzes on supported files. if (hasNoExtension(FileName)): continue - if (isValidSingleInputFile(FileName) == False): + if (not isValidSingleInputFile(FileName)): print "Error: Invalid single input file %s." % (FullFileName,) raise Exception() @@ -382,44 +396,46 @@ try: if Verbose == 1: print " Executing: %s" % (Command,) - check_call(Command, cwd = Dir, stderr=LogFile, - stdout=LogFile, - shell=True) + check_call(Command, cwd=Dir, stderr=LogFile, stdout=LogFile, + shell=True) except CalledProcessError, e: print "Error: Analyzes of %s failed. See %s for details." \ "Error code %d." % \ - (FullFileName, LogFile.name, e.returncode) + (FullFileName, LogFile.name, e.returncode) Failed = True finally: LogFile.close() # If command did not fail, erase the log file. - if Failed == False: - os.remove(LogFile.name); + if not Failed: + os.remove(LogFile.name) + def getBuildLogPath(SBOutputDir): return os.path.join(SBOutputDir, LogFolderName, BuildLogName) + def removeLogFile(SBOutputDir): BuildLogPath = getBuildLogPath(SBOutputDir) # Clean up the log file. - if (os.path.exists(BuildLogPath)) : + if (os.path.exists(BuildLogPath)): RmCommand = "rm '%s'" % BuildLogPath if Verbose == 1: print " Executing: %s" % (RmCommand,) check_call(RmCommand, shell=True) + def buildProject(Dir, SBOutputDir, ProjectBuildMode, IsReferenceBuild): TBegin = time.time() BuildLogPath = getBuildLogPath(SBOutputDir) print "Log file: %s" % (BuildLogPath,) - print "Output directory: %s" %(SBOutputDir, ) + print "Output directory: %s" % (SBOutputDir, ) removeLogFile(SBOutputDir) # Clean up scan build results. - if (os.path.exists(SBOutputDir)) : + if (os.path.exists(SBOutputDir)): RmCommand = "rm -r '%s'" % SBOutputDir if Verbose == 1: print " Executing: %s" % (RmCommand,) @@ -439,7 +455,7 @@ else: runAnalyzePreprocessed(Dir, SBOutputDir, ProjectBuildMode) - if IsReferenceBuild : + if IsReferenceBuild: runCleanupScript(Dir, PBuildLogFile) # Make the absolute paths relative in the reference results. @@ -452,9 +468,9 @@ PathPrefix = Dir if (ProjectBuildMode == 1): PathPrefix = os.path.join(Dir, PatchedSourceDirName) - Paths = [SourceFile[len(PathPrefix)+1:]\ - if SourceFile.startswith(PathPrefix)\ - else SourceFile for SourceFile in Data['files']] + Paths = [SourceFile[len(PathPrefix) + 1:] + if SourceFile.startswith(PathPrefix) + else SourceFile for SourceFile in Data['files']] Data['files'] = Paths plistlib.writePlist(Data, Plist) @@ -462,8 +478,9 @@ PBuildLogFile.close() print "Build complete (time: %.2f). See the log for more details: %s" % \ - ((time.time()-TBegin), BuildLogPath) + ((time.time() - TBegin), BuildLogPath) + # A plist file is created for each call to the analyzer(each source file). # We are only interested on the once that have bug reports, so delete the rest. def CleanUpEmptyPlists(SBOutputDir): @@ -476,19 +493,20 @@ os.remove(P) continue + # Given the scan-build output directory, checks if the build failed # (by searching for the failures directories). If there are failures, it # creates a summary file in the output directory. def checkBuild(SBOutputDir): # Check if there are failures. Failures = glob.glob(SBOutputDir + "/*/failures/*.stderr.txt") - TotalFailed = len(Failures); + TotalFailed = len(Failures) if TotalFailed == 0: CleanUpEmptyPlists(SBOutputDir) Plists = glob.glob(SBOutputDir + "/*/*.plist") print "Number of bug reports (non-empty plist files) produced: %d" %\ - len(Plists) - return; + len(Plists) + return # Create summary file to display when the build fails. SummaryPath = os.path.join(SBOutputDir, LogFolderName, FailuresSummaryFileName) @@ -500,19 +518,18 @@ SummaryLog.write("Total of %d failures discovered.\n" % (TotalFailed,)) if TotalFailed > NumOfFailuresInSummary: SummaryLog.write("See the first %d below.\n" - % (NumOfFailuresInSummary,)) + (NumOfFailuresInSummary,)) # TODO: Add a line "See the results folder for more." - FailuresCopied = NumOfFailuresInSummary Idx = 0 for FailLogPathI in Failures: if Idx >= NumOfFailuresInSummary: - break; + break Idx += 1 - SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,)); - FailLogI = open(FailLogPathI, "r"); + SummaryLog.write("\n-- Error #%d -----------\n" % (Idx,)) + FailLogI = open(FailLogPathI, "r") try: - shutil.copyfileobj(FailLogI, SummaryLog); + shutil.copyfileobj(FailLogI, SummaryLog) finally: FailLogI.close() finally: @@ -521,17 +538,19 @@ print "Error: analysis failed. See ", SummaryPath sys.exit(-1) + # Auxiliary object to discard stdout. class Discarder(object): def write(self, text): - pass # do nothing + pass # do nothing + # Compare the warnings produced by scan-build. # Strictness defines the success criteria for the test: # 0 - success if there are no crashes or analyzer failure. # 1 - success if there are no difference in the number of reported bugs. # 2 - success if all the bug reports are identical. -def runCmpResults(Dir, Strictness = 0): +def runCmpResults(Dir, Strictness=0): TBegin = time.time() RefDir = os.path.join(Dir, SBOutputDirReferencePrefix + SBOutputDirName) @@ -572,14 +591,15 @@ DiffsPath = os.path.join(NewDir, DiffsSummaryFileName) PatchedSourceDirPath = os.path.join(Dir, PatchedSourceDirName) Opts = CmpRuns.CmpOptions(DiffsPath, "", PatchedSourceDirPath) - # Discard everything coming out of stdout (CmpRun produces a lot of them). + # Discard everything coming out of stdout + # (CmpRun produces a lot of them). OLD_STDOUT = sys.stdout sys.stdout = Discarder() # Scan the results, delete empty plist files. NumDiffs, ReportsInRef, ReportsInNew = \ CmpRuns.dumpScanBuildResultsDiff(RefDir, NewDir, Opts, False) sys.stdout = OLD_STDOUT - if (NumDiffs > 0) : + if (NumDiffs > 0): print "Warning: %r differences in diagnostics. See %s" % \ (NumDiffs, DiffsPath,) if Strictness >= 2 and NumDiffs > 0: @@ -589,9 +609,10 @@ print "Error: The number of results are different in strict mode (1)." sys.exit(-1) - print "Diagnostic comparison complete (time: %.2f)." % (time.time()-TBegin) + print "Diagnostic comparison complete (time: %.2f)." % (time.time() - TBegin) return (NumDiffs > 0) + def cleanupReferenceResults(SBOutputDir): # Delete html, css, and js files from reference results. These can # include multiple copies of the benchmark source and so get very large. @@ -605,6 +626,7 @@ # Remove the log file. It leaks absolute path names. removeLogFile(SBOutputDir) + def updateSVN(Mode, ProjectsMap): try: ProjectsMap.seek(0) @@ -634,12 +656,14 @@ print "Error: SVN update failed." sys.exit(-1) -def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, Strictness = 0): + +def testProject(ID, ProjectBuildMode, IsReferenceBuild=False, Dir=None, + Strictness=0): print " \n\n--- Building project %s" % (ID,) TBegin = time.time() - if Dir is None : + if Dir is None: Dir = getProjectDir(ID) if Verbose == 1: print " Build directory: %s." % (Dir,) @@ -652,20 +676,21 @@ checkBuild(SBOutputDir) - if IsReferenceBuild == False: + if not IsReferenceBuild: runCmpResults(Dir, Strictness) else: cleanupReferenceResults(SBOutputDir) print "Completed tests for project %s (time: %.2f)." % \ - (ID, (time.time()-TBegin)) + (ID, (time.time() - TBegin)) -def testAll(IsReferenceBuild = False, UpdateSVN = False, Strictness = 0): + +def testAll(IsReferenceBuild=False, UpdateSVN=False, Strictness=0): PMapFile = open(getProjectMapPath(), "rb") try: # Validate the input. for I in csv.reader(PMapFile): - if (len(I) != 2) : + if (len(I) != 2): print "Error: Rows in the ProjectMapFile should have 3 entries." raise Exception() if (not ((I[1] == "0") | (I[1] == "1") | (I[1] == "2"))): @@ -675,9 +700,9 @@ # When we are regenerating the reference results, we might need to # update svn. Remove reference results from SVN. - if UpdateSVN == True: - assert(IsReferenceBuild == True); - updateSVN("delete", PMapFile); + if UpdateSVN: + assert(IsReferenceBuild) + updateSVN("delete", PMapFile) # Test the projects. PMapFile.seek(0) @@ -685,8 +710,8 @@ testProject(I[0], int(I[1]), IsReferenceBuild, None, Strictness) # Add reference results to SVN. - if UpdateSVN == True: - updateSVN("add", PMapFile); + if UpdateSVN: + updateSVN("add", PMapFile) except: print "Error occurred. Premature termination." @@ -698,13 +723,14 @@ # Parse command line arguments. Parser = argparse.ArgumentParser(description='Test the Clang Static Analyzer.') Parser.add_argument('--strictness', dest='strictness', type=int, default=0, - help='0 to fail on runtime errors, 1 to fail when the number\ + help='0 to fail on runtime errors, 1 to fail when the number\ of found bugs are different from the reference, 2 to \ fail on any difference from the reference. Default is 0.') - Parser.add_argument('-r', dest='regenerate', action='store_true', default=False, - help='Regenerate reference output.') + Parser.add_argument('-r', dest='regenerate', action='store_true', + default=False, help='Regenerate reference output.') Parser.add_argument('-rs', dest='update_reference', action='store_true', - default=False, help='Regenerate reference output and update svn.') + default=False, + help='Regenerate reference output and update svn.') Args = Parser.parse_args() IsReference = False Index: C:/LLVM/llvm/tools/clang/utils/analyzer/SumTimerInfo.py =================================================================== --- C:/LLVM/llvm/tools/clang/utils/analyzer/SumTimerInfo.py +++ C:/LLVM/llvm/tools/clang/utils/analyzer/SumTimerInfo.py @@ -33,40 +33,41 @@ MaxCFGSize = 0 Mode = 1 for line in f: - if ("Miscellaneous Ungrouped Timers" in line) : + if ("Miscellaneous Ungrouped Timers" in line): Mode = 1 - if (("Analyzer Total Time" in line) and (Mode == 1)) : + if (("Analyzer Total Time" in line) and (Mode == 1)): s = line.split() Time = Time + float(s[6]) Count = Count + 1 - if (float(s[6]) > MaxTime) : + if (float(s[6]) > MaxTime): MaxTime = float(s[6]) - if ((("warning generated." in line) or ("warnings generated" in line)) and Mode == 1) : + if ((("warning generated." in line) or ("warnings generated" in line)) and Mode == 1): s = line.split() Warnings = Warnings + int(s[0]) - if (("The # of functions analysed (as top level)" in line) and (Mode == 1)) : + if (("The # of functions analysed (as top level)" in line) and (Mode == 1)): s = line.split() FunctionsAnalyzed = FunctionsAnalyzed + int(s[0]) - if (("The % of reachable basic blocks" in line) and (Mode == 1)) : + if (("The % of reachable basic blocks" in line) and (Mode == 1)): s = line.split() ReachableBlocks = ReachableBlocks + int(s[0]) - if (("The # of times we reached the max number of steps" in line) and (Mode == 1)) : + if (("The # of times we reached the max number of steps" in line) and (Mode == 1)): s = line.split() ReachedMaxSteps = ReachedMaxSteps + int(s[0]) - if (("The maximum number of basic blocks in a function" in line) and (Mode == 1)) : + if (("The maximum number of basic blocks in a function" in line) and (Mode == 1)): s = line.split() - if (MaxCFGSize < int(s[0])) : + if (MaxCFGSize < int(s[0])): MaxCFGSize = int(s[0]) - if (("The # of steps executed" in line) and (Mode == 1)) : + if (("The # of steps executed" in line) and (Mode == 1)): s = line.split() NumSteps = NumSteps + int(s[0]) - if (("The # of times we inlined a call" in line) and (Mode == 1)) : + if (("The # of times we inlined a call" in line) and (Mode == 1)): s = line.split() NumInlinedCallSites = NumInlinedCallSites + int(s[0]) - if (("The # of times we split the path due to imprecise dynamic dispatch info" in line) and (Mode == 1)) : + if (("The # of times we split the path due to imprecise dynamic dispatch info" in line) + and (Mode == 1)): s = line.split() NumBifurcatedCallSites = NumBifurcatedCallSites + int(s[0]) - if ((") Total" in line) and (Mode == 1)) : + if ((") Total" in line) and (Mode == 1)): s = line.split() TotalTime = TotalTime + float(s[6])