Index: CMakeLists.txt =================================================================== --- CMakeLists.txt +++ CMakeLists.txt @@ -66,6 +66,12 @@ set(TEST_SUITE_REMOTE_HOST "" CACHE STRING "Remote execution host") mark_as_advanced(TEST_SUITE_REMOTE_CLIENT) +add_custom_target(rsync + COMMAND ${PROJECT_SOURCE_DIR}/utils/rsync.sh + ${TEST_SUITE_REMOTE_HOST} ${PROJECT_BINARY_DIR} + USES_TERMINAL +) + # Run Under configuration for RunSafely.sh (will be set in lit.site.cfg) set(TEST_SUITE_RUN_UNDER "" CACHE STRING "RunSafely.sh run-under (-u) parameter") Index: cmake/modules/TestSuite.cmake =================================================================== --- cmake/modules/TestSuite.cmake +++ cmake/modules/TestSuite.cmake @@ -6,6 +6,14 @@ include(TestFile) include(CopyDir) +set(_DEFAULT_TEST_SUITE_COPY_DATA OFF) +if(TEST_SUITE_REMOTE_HOST) + set(_DEFAULT_TEST_SUITE_COPY_DATA ON) +endif() +option(TEST_SUITE_COPY_DATA "Always copy benchmark data to builddir" + ${_DEFAULT_TEST_SUITE_COPY_DATA}) +mark_as_advanced(TEST_SUITE_COPY_DATA) + # Copies files and directories to be used as benchmark input data to the # directory of the benchmark executable. # Paths are interepreted relative to CMAKE_CURRENT_SOURCE_DIR by default but @@ -18,7 +26,7 @@ endif() foreach(file ${_LTDARGS_UNPARSED_ARGUMENTS}) set(full_path ${SOURCE_DIR}/${file}) - if(_LTDARGS_MUST_COPY) + if(_LTDARGS_MUST_COPY OR TEST_SUITE_COPY_DATA) if(IS_DIRECTORY ${full_path}) llvm_copy_dir(${target} $/${file} ${full_path}) else() @@ -87,9 +95,9 @@ function(test_suite_add_build_dependencies target) add_dependencies(${target} build-HashProgramOutput.sh - build-fpcmp build-timeit build-timeit-target + fpcmp ) endfunction() Index: litsupport/modules/compiletime.py =================================================================== --- litsupport/modules/compiletime.py +++ litsupport/modules/compiletime.py @@ -18,10 +18,12 @@ for file in files: if file.endswith('.o.time') and file.startswith(prefix): fullpath = os.path.join(path, file) - compile_time += timeit.getUserTime(fullpath) + with open(fullpath) as fd: + compile_time += timeit.getUserTime(fd.read()) if file.endswith('.link.time') and file.startswith(prefix): fullpath = os.path.join(path, file) - link_time += timeit.getUserTime(fullpath) + with open(fullpath) as fd: + link_time += timeit.getUserTime(fd.read()) return { 'compile_time': compile_time, 'link_time': link_time, Index: litsupport/modules/microbenchmark.py =================================================================== --- litsupport/modules/microbenchmark.py +++ litsupport/modules/microbenchmark.py @@ -24,22 +24,22 @@ def _collectMicrobenchmarkTime(context, microbenchfiles): for f in microbenchfiles: - with open(f) as inp: - lines = csv.reader(inp) - # First line: "name,iterations,real_time,cpu_time,time_unit..." - for line in lines: - if line[0] == 'name': - continue - # Name for MicroBenchmark - name = line[0] - # Create Result object with PASS - microBenchmark = lit.Test.Result(lit.Test.PASS) - - # Index 3 is cpu_time - microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3]))) - - # Add Micro Result - context.micro_results[name] = microBenchmark + content = context.read_result_file(context, f) + lines = csv.reader(content.splitlines()) + # First line: "name,iterations,real_time,cpu_time,time_unit..." + for line in lines: + if line[0] == 'name': + continue + # Name for MicroBenchmark + name = line[0] + # Create Result object with PASS + microBenchmark = lit.Test.Result(lit.Test.PASS) + + # Index 3 is cpu_time + microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3]))) + + # Add Micro Result + context.micro_results[name] = microBenchmark # returning the number of microbenchmarks collected as a metric for the base test return ({'MicroBenchmarks': lit.Test.toMetricValue(len(context.micro_results))}) Index: litsupport/modules/remote.py =================================================================== --- litsupport/modules/remote.py +++ litsupport/modules/remote.py @@ -1,29 +1,43 @@ """Test module to execute a benchmark through ssh on a remote device. This assumes all relevant directories and files are present on the remote device (typically shared by NFS).""" +from litsupport import shellcommand from litsupport import testplan import logging +import os +import subprocess -def _mutateCommandline(context, commandline, suffix=""): - shfilename = context.tmpBase + suffix + ".sh" - shfile = open(shfilename, "w") - shfile.write(commandline + "\n") - logging.info("Created shfile '%s'", shfilename) - shfile.close() +def _wrap_command(context, command): + escaped_command = command.replace("'", "'\\''") + return "%s %s '%s'" % (context.config.remote_client, + context.config.remote_host, escaped_command) - config = context.config - remote_commandline = config.remote_client - remote_commandline += " %s" % config.remote_host - remote_commandline += " /bin/sh %s" % shfilename - return remote_commandline +def _mutateCommandline(context, commandline): + return _wrap_command(context, commandline) -def _mutateScript(context, script, suffix=""): - mutate = lambda c, cmd: _mutateCommandline(c, cmd, suffix) + +def _mutateScript(context, script): + mutate = lambda c, cmd: _mutateCommandline(c, cmd) return testplan.mutateScript(context, script, mutate) +def remote_read_result_file(context, path): + assert os.path.isabs(path) + command = _wrap_command(context, "cat '%s'" % path) + logging.info("$ %s", command) + return subprocess.check_output(command, shell=True) + + def mutatePlan(context, plan): - plan.preparescript = _mutateScript(context, plan.preparescript, "-prepare") + plan.preparescript = _mutateScript(context, plan.preparescript) + plan.preparescript.insert(0, + _wrap_command(context, "mkdir -p '%s'" % + os.path.dirname(context.tmpBase)) + ) plan.runscript = _mutateScript(context, plan.runscript) + plan.verifyscript = _mutateScript(context, plan.verifyscript) + + assert context.read_result_file is testplan.default_read_result_file + context.read_result_file = remote_read_result_file Index: litsupport/modules/timeit.py =================================================================== --- litsupport/modules/timeit.py +++ litsupport/modules/timeit.py @@ -5,7 +5,6 @@ def _mutateCommandLine(context, commandline): - outfile = context.tmpBase + ".out" timefile = context.tmpBase + ".time" config = context.config cmd = shellcommand.parse(commandline) @@ -34,6 +33,7 @@ if cmd.stdout is not None or cmd.stderr is not None: raise Exception("Separate stdout/stderr redirection not " + "possible with traditional output") + outfile = context.tmpBase + ".out" args += ["--append-exitstatus"] args += ["--redirect-output", outfile] stdin = cmd.stdin @@ -64,7 +64,8 @@ def _collectTime(context, timefiles, metric_name='exec_time'): time = 0.0 for timefile in timefiles: - time += getUserTime(timefile) + filecontent = context.read_result_file(context, timefile) + time += getUserTime(filecontent) return {metric_name: time} @@ -78,11 +79,9 @@ ) -def getUserTime(filename): +def getUserTime(timefile): """Extract the user time form a .time file produced by timeit""" - with open(filename) as fd: - l = [l for l in fd.readlines() - if l.startswith('user')] + l = [l for l in timefile.splitlines() if l.startswith('user')] assert len(l) == 1 m = re.match(r'user\s+([0-9.]+)', l[0]) Index: litsupport/test.py =================================================================== --- litsupport/test.py +++ litsupport/test.py @@ -18,19 +18,6 @@ NOEXE = lit.Test.ResultCode('NOEXE', True) -class TestContext: - """This class is used to hold data used while constructing a testrun. - For example this can be used by modules modifying the commandline with - extra instrumentation/measurement wrappers to pass the filenames of the - results to a final data collection step.""" - def __init__(self, test, litConfig, tmpDir, tmpBase): - self.test = test - self.config = test.config - self.litConfig = litConfig - self.tmpDir = tmpDir - self.tmpBase = tmpBase - - class TestSuiteTest(lit.formats.ShTest): def __init__(self): super(TestSuiteTest, self).__init__() @@ -45,7 +32,8 @@ # Parse .test file and initialize context tmpDir, tmpBase = lit.TestRunner.getTempPaths(test) lit.util.mkdir_p(os.path.dirname(tmpBase)) - context = TestContext(test, litConfig, tmpDir, tmpBase) + context = litsupport.testplan.TestContext(test, litConfig, tmpDir, + tmpBase) litsupport.testfile.parse(context, test.getSourcePath()) plan = litsupport.testplan.TestPlan() Index: litsupport/testplan.py =================================================================== --- litsupport/testplan.py +++ litsupport/testplan.py @@ -170,3 +170,22 @@ """Wrapper around subprocess.check_call that logs the command.""" logging.info(" ".join(commandline)) return subprocess.check_call(commandline, *aargs, **dargs) + + +def default_read_result_file(context, path): + with open(path) as fd: + return fd.read() + + +class TestContext: + """This class is used to hold data used while constructing a testrun. + For example this can be used by modules modifying the commandline with + extra instrumentation/measurement wrappers to pass the filenames of the + results to a final data collection step.""" + def __init__(self, test, litConfig, tmpDir, tmpBase): + self.test = test + self.config = test.config + self.litConfig = litConfig + self.tmpDir = tmpDir + self.tmpBase = tmpBase + self.read_result_file = default_read_result_file Index: tools/CMakeLists.txt =================================================================== --- tools/CMakeLists.txt +++ tools/CMakeLists.txt @@ -4,7 +4,7 @@ include(Host) -llvm_add_host_executable(build-fpcmp fpcmp fpcmp.c) +add_executable(fpcmp fpcmp.c) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/HashProgramOutput.sh Index: utils/rsync.sh =================================================================== --- /dev/null +++ utils/rsync.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Sync a build directory to remote device for running. +set -eu +DEVICE="$1" +BUILDDIR="$2" + +case $BUILDDIR in + /*) ;; + *) + echo 1>&2 "Builddir path must be absolute!" + exit 1 + ;; +esac + +RSYNC_FLAGS="" +RSYNC_FLAGS+=" -arR" +RSYNC_FLAGS+=" --delete --delete-excluded" +# We cannot easily differentiate between intermediate build results and +# files necessary to run the benchmark, so for now we just exclude based on +# some file extensions... +RSYNC_FLAGS+=" --exclude=\"*.o\"" +RSYNC_FLAGS+=" --exclude=\"*.a\"" +RSYNC_FLAGS+=" --exclude=\"*.time\"" +RSYNC_FLAGS+=" --exclude=\"*.cmake\"" +RSYNC_FLAGS+=" --exclude=Output/" +RSYNC_FLAGS+=" --exclude=.ninja_deps" +RSYNC_FLAGS+=" --exclude=.ninja_log" +RSYNC_FLAGS+=" --exclude=build.ninja" +RSYNC_FLAGS+=" --exclude=rules.ninja" +RSYNC_FLAGS+=" --exclude=CMakeFiles/" +#RSYNC_FLAGS+=" -v" + +set -x +eval rsync $RSYNC_FLAGS $BUILDDIR $DEVICE:/