Index: CMakeLists.txt =================================================================== --- CMakeLists.txt +++ CMakeLists.txt @@ -66,6 +66,12 @@ set(TEST_SUITE_REMOTE_HOST "" CACHE STRING "Remote execution host") mark_as_advanced(TEST_SUITE_REMOTE_CLIENT) +add_custom_target(rsync + COMMAND ${PROJECT_SOURCE_DIR}/utils/rsync.sh + ${TEST_SUITE_REMOTE_HOST} ${PROJECT_BINARY_DIR} + USES_TERMINAL +) + # Run Under configuration for RunSafely.sh (will be set in lit.site.cfg) set(TEST_SUITE_RUN_UNDER "" CACHE STRING "RunSafely.sh run-under (-u) parameter") Index: cmake/modules/TestSuite.cmake =================================================================== --- cmake/modules/TestSuite.cmake +++ cmake/modules/TestSuite.cmake @@ -6,6 +6,14 @@ include(TestFile) include(CopyDir) +set(_DEFAULT_TEST_SUITE_COPY_DATA OFF) +if(TEST_SUITE_REMOTE_HOST) + set(_DEFAULT_TEST_SUITE_COPY_DATA ON) +endif() +option(TEST_SUITE_COPY_DATA "Always copy benchmark data to builddir" + ${_DEFAULT_TEST_SUITE_COPY_DATA}) +mark_as_advanced(TEST_SUITE_COPY_DATA) + # Copies files and directories to be used as benchmark input data to the # directory of the benchmark executable. # Paths are interepreted relative to CMAKE_CURRENT_SOURCE_DIR by default but @@ -18,7 +26,7 @@ endif() foreach(file ${_LTDARGS_UNPARSED_ARGUMENTS}) set(full_path ${SOURCE_DIR}/${file}) - if(_LTDARGS_MUST_COPY) + if(_LTDARGS_MUST_COPY OR TEST_SUITE_COPY_DATA) if(IS_DIRECTORY ${full_path}) llvm_copy_dir(${target} $/${file} ${full_path}) else() @@ -87,9 +95,9 @@ function(test_suite_add_build_dependencies target) add_dependencies(${target} build-HashProgramOutput.sh - build-fpcmp build-timeit build-timeit-target + fpcmp ) endfunction() Index: litsupport/modules/microbenchmark.py =================================================================== --- litsupport/modules/microbenchmark.py +++ litsupport/modules/microbenchmark.py @@ -24,22 +24,22 @@ def _collectMicrobenchmarkTime(context, microbenchfiles): for f in microbenchfiles: - with open(f) as inp: - lines = csv.reader(inp) - # First line: "name,iterations,real_time,cpu_time,time_unit..." - for line in lines: - if line[0] == 'name': - continue - # Name for MicroBenchmark - name = line[0] - # Create Result object with PASS - microBenchmark = lit.Test.Result(lit.Test.PASS) - - # Index 3 is cpu_time - microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3]))) - - # Add Micro Result - context.micro_results[name] = microBenchmark + content = context.read_result_file(context, f) + lines = csv.reader(content.splitlines()) + # First line: "name,iterations,real_time,cpu_time,time_unit..." + for line in lines: + if line[0] == 'name': + continue + # Name for MicroBenchmark + name = line[0] + # Create Result object with PASS + microBenchmark = lit.Test.Result(lit.Test.PASS) + + # Index 3 is cpu_time + microBenchmark.addMetric('exec_time', lit.Test.toMetricValue(float(line[3]))) + + # Add Micro Result + context.micro_results[name] = microBenchmark # returning the number of microbenchmarks collected as a metric for the base test return ({'MicroBenchmarks': lit.Test.toMetricValue(len(context.micro_results))}) Index: litsupport/modules/profilegen.py =================================================================== --- litsupport/modules/profilegen.py +++ litsupport/modules/profilegen.py @@ -20,6 +20,8 @@ context.profilefiles = [] # Adjust run steps to set LLVM_PROFILE_FILE environment variable. plan.runscript = _mutateScript(context, plan.runscript) + plan.profile_files += context.profilefiles + # Run profdata merge at the end profdatafile = context.executable + ".profdata" args = ['merge', '-output=%s' % profdatafile] + context.profilefiles Index: litsupport/modules/remote.py =================================================================== --- litsupport/modules/remote.py +++ litsupport/modules/remote.py @@ -1,29 +1,54 @@ """Test module to execute a benchmark through ssh on a remote device. This assumes all relevant directories and files are present on the remote device (typically shared by NFS).""" +from litsupport import shellcommand from litsupport import testplan import logging +import os +import subprocess -def _mutateCommandline(context, commandline, suffix=""): - shfilename = context.tmpBase + suffix + ".sh" - shfile = open(shfilename, "w") - shfile.write(commandline + "\n") - logging.info("Created shfile '%s'", shfilename) - shfile.close() +def _wrap_command(context, command): + escaped_command = command.replace("'", "'\\''") + return "%s %s '%s'" % (context.config.remote_client, + context.config.remote_host, escaped_command) - config = context.config - remote_commandline = config.remote_client - remote_commandline += " %s" % config.remote_host - remote_commandline += " /bin/sh %s" % shfilename - return remote_commandline +def _mutateCommandline(context, commandline): + return _wrap_command(context, commandline) -def _mutateScript(context, script, suffix=""): - mutate = lambda c, cmd: _mutateCommandline(c, cmd, suffix) + +def _mutateScript(context, script): + def mutate(context, command): + return _mutateCommandline(context, command) return testplan.mutateScript(context, script, mutate) +def remote_read_result_file(context, path): + assert os.path.isabs(path) + command = _wrap_command(context, "cat '%s'" % path) + logging.info("$ %s", command) + return subprocess.check_output(command, shell=True) + + def mutatePlan(context, plan): - plan.preparescript = _mutateScript(context, plan.preparescript, "-prepare") + plan.preparescript = _mutateScript(context, plan.preparescript) + # We need the temporary directory to exist on the remote as well. + command = _wrap_command(context, + "mkdir -p '%s'" % os.path.dirname(context.tmpBase)) + plan.preparescript.insert(0, command) plan.runscript = _mutateScript(context, plan.runscript) + plan.verifyscript = _mutateScript(context, plan.verifyscript) + for name, script in plan.metricscripts.items(): + plan.metricscripts[name] = _mutateScript(context, script) + + # Merging profile data should happen on the host because that is where + # the toolchain resides, however we have to retrieve the profile data + # from the device first, add commands for that to the profile script. + for path in plan.profile_files: + assert os.path.isabs(path) + command = "scp %s:%s %s" % (context.config.remote_host, path, path) + plan.profilescript.insert(0, command) + + assert context.read_result_file is testplan.default_read_result_file + context.read_result_file = remote_read_result_file Index: litsupport/modules/timeit.py =================================================================== --- litsupport/modules/timeit.py +++ litsupport/modules/timeit.py @@ -5,7 +5,6 @@ def _mutateCommandLine(context, commandline): - outfile = context.tmpBase + ".out" timefile = context.tmpBase + ".time" config = context.config cmd = shellcommand.parse(commandline) @@ -34,6 +33,7 @@ if cmd.stdout is not None or cmd.stderr is not None: raise Exception("Separate stdout/stderr redirection not " + "possible with traditional output") + outfile = context.tmpBase + ".out" args += ["--append-exitstatus"] args += ["--redirect-output", outfile] stdin = cmd.stdin @@ -64,7 +64,8 @@ def _collectTime(context, timefiles, metric_name='exec_time'): time = 0.0 for timefile in timefiles: - time += getUserTime(timefile) + filecontent = context.read_result_file(context, timefile) + time += getUserTimeFromContents(filecontent) return {metric_name: time} @@ -79,10 +80,14 @@ def getUserTime(filename): - """Extract the user time form a .time file produced by timeit""" + """Extract the user time from a .time file produced by timeit""" with open(filename) as fd: - l = [l for l in fd.readlines() - if l.startswith('user')] + contents = fd.read() + return getUserTimeFromContents(contents) + + +def getUserTimeFromContents(contents): + l = [l for l in contents.splitlines() if l.startswith('user')] assert len(l) == 1 m = re.match(r'user\s+([0-9.]+)', l[0]) Index: litsupport/test.py =================================================================== --- litsupport/test.py +++ litsupport/test.py @@ -18,19 +18,6 @@ NOEXE = lit.Test.ResultCode('NOEXE', True) -class TestContext: - """This class is used to hold data used while constructing a testrun. - For example this can be used by modules modifying the commandline with - extra instrumentation/measurement wrappers to pass the filenames of the - results to a final data collection step.""" - def __init__(self, test, litConfig, tmpDir, tmpBase): - self.test = test - self.config = test.config - self.litConfig = litConfig - self.tmpDir = tmpDir - self.tmpBase = tmpBase - - class TestSuiteTest(lit.formats.ShTest): def __init__(self): super(TestSuiteTest, self).__init__() @@ -45,7 +32,8 @@ # Parse .test file and initialize context tmpDir, tmpBase = lit.TestRunner.getTempPaths(test) lit.util.mkdir_p(os.path.dirname(tmpBase)) - context = TestContext(test, litConfig, tmpDir, tmpBase) + context = litsupport.testplan.TestContext(test, litConfig, tmpDir, + tmpBase) litsupport.testfile.parse(context, test.getSourcePath()) plan = litsupport.testplan.TestPlan() Index: litsupport/testplan.py =================================================================== --- litsupport/testplan.py +++ litsupport/testplan.py @@ -22,6 +22,7 @@ self.metricscripts = {} self.metric_collectors = [] self.preparescript = [] + self.profile_files = [] self.profilescript = [] @@ -170,3 +171,22 @@ """Wrapper around subprocess.check_call that logs the command.""" logging.info(" ".join(commandline)) return subprocess.check_call(commandline, *aargs, **dargs) + + +def default_read_result_file(context, path): + with open(path) as fd: + return fd.read() + + +class TestContext: + """This class is used to hold data used while constructing a testrun. + For example this can be used by modules modifying the commandline with + extra instrumentation/measurement wrappers to pass the filenames of the + results to a final data collection step.""" + def __init__(self, test, litConfig, tmpDir, tmpBase): + self.test = test + self.config = test.config + self.litConfig = litConfig + self.tmpDir = tmpDir + self.tmpBase = tmpBase + self.read_result_file = default_read_result_file Index: tools/CMakeLists.txt =================================================================== --- tools/CMakeLists.txt +++ tools/CMakeLists.txt @@ -1,10 +1,13 @@ -# Note that we have to compile fpcmp and timeit for the host machine even when -# cross compiling to a different target. We use custom rules doing a simple -# "cc file.c". +# Tools for compiling and running the benchmarks. +# +# Note: Tools used while running the benchmark should be (cross-)compiled +# normally while tools used for building the benchmark need to be built for +# the host system (even when cross-compiling the benchmark) with +# `llvm_add_host_executable`. include(Host) -llvm_add_host_executable(build-fpcmp fpcmp fpcmp.c) +add_executable(fpcmp fpcmp.c) add_custom_command( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/HashProgramOutput.sh Index: utils/rsync.sh =================================================================== --- /dev/null +++ utils/rsync.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Sync a build directory to remote device for running. +set -eu +DEVICE="$1" +BUILDDIR="$2" + +case $BUILDDIR in + /*) ;; + *) + echo 1>&2 "Builddir path must be absolute!" + exit 1 + ;; +esac + +RSYNC_FLAGS="" +RSYNC_FLAGS+=" -arR" +RSYNC_FLAGS+=" --delete --delete-excluded" +# We cannot easily differentiate between intermediate build results and +# files necessary to run the benchmark, so for now we just exclude based on +# some file extensions... +RSYNC_FLAGS+=" --exclude=\"*.o\"" +RSYNC_FLAGS+=" --exclude=\"*.a\"" +RSYNC_FLAGS+=" --exclude=\"*.time\"" +RSYNC_FLAGS+=" --exclude=\"*.cmake\"" +RSYNC_FLAGS+=" --exclude=Output/" +RSYNC_FLAGS+=" --exclude=.ninja_deps" +RSYNC_FLAGS+=" --exclude=.ninja_log" +RSYNC_FLAGS+=" --exclude=build.ninja" +RSYNC_FLAGS+=" --exclude=rules.ninja" +RSYNC_FLAGS+=" --exclude=CMakeFiles/" +#RSYNC_FLAGS+=" -v" + +set -x +eval rsync $RSYNC_FLAGS $BUILDDIR $DEVICE:/