Index: lnt/trunk/lnt/lnttool/main.py =================================================================== --- lnt/trunk/lnt/lnttool/main.py +++ lnt/trunk/lnt/lnttool/main.py @@ -151,6 +151,18 @@ lnt.util.ImportData.print_report_result(result, sys.stdout, sys.stderr, verbose = True) + +def _print_result_url(results, verbose): + result_url = results.get('result_url') + if result_url is not None: + if verbose: + print "Results available at:", result_url + else: + print result_url + elif verbose: + print "Results available at: no URL available" + + def action_runtest(name, args): """run a builtin test application""" @@ -193,10 +205,7 @@ parser.error('invalid test name %r' % test_name) server_results = test_instance.run_test('%s %s' % (name, test_name), args) - if server_results.get('result_url'): - print "Results available at:", server_results['result_url'] - else: - print "Results available at: no URL available" + _print_result_url(server_results, verbose=True) def action_showtests(name, args): @@ -239,10 +248,12 @@ from lnt.util import ServerUtil files = ServerUtil.submitFiles(args[0], args[1:], opts.commit, opts.verbose) - if opts.verbose: - for f in files: + for f in files: + if opts.verbose: lnt.util.ImportData.print_report_result(f, sys.stdout, sys.stderr, True) + _print_result_url(f, opts.verbose) + def action_update(name, args): """create and or auto-update the given database""" Index: lnt/trunk/tests/SharedInputs/sample-report.json =================================================================== --- lnt/trunk/tests/SharedInputs/sample-report.json +++ lnt/trunk/tests/SharedInputs/sample-report.json @@ -0,0 +1,55 @@ +{ + "Machine": { + "Info": { + "os": "SAMPLE OS", + "hardware": "x86_64" + }, + "Name": "LNT SAMPLE MACHINE" + }, + "Run": { + "Start Time": "2009-11-17 02:12:25", + "End Time": "2009-11-17 03:44:48", + "Info": { + "run_order": "1", + "tag": "nts", + "__report_version__": "1" + } + }, + "Tests": [ + { + "Data": [ + 0.6995 + ], + "Info": {}, + "Name": "nts.SingleSource/Benchmarks/Adobe-C++/functionobjects.compile" + }, + { + "Data": [ + "49333a87d501b0aea2191830b66b5eec" + ], + "Info": {}, + "Name": "nts.SingleSource/Benchmarks/Adobe-C++/functionobjects.hash" + }, + { + "Data": [ + 2.2521 + ], + "Info": {}, + "Name": "nts.SingleSource/Benchmarks/Adobe-C++/functionobjects.exec" + }, + { + "Data": [ + 0.1234 + ], + "Info": {}, + "Name": "nts.SingleSource/Benchmarks/Adobe-C++/functionobjects-link.compile" + }, + { + "Data": [ + 12432.0 + ], + "Info": {}, + "Name": "nts.SingleSource/Benchmarks/Adobe-C++/functionobjects.code_size" + } + ] +} Index: lnt/trunk/tests/SharedInputs/server_wrapper.sh =================================================================== --- lnt/trunk/tests/SharedInputs/server_wrapper.sh +++ lnt/trunk/tests/SharedInputs/server_wrapper.sh @@ -0,0 +1,50 @@ +#!/bin/bash +# This script wraps a call to lnt runtest with a local server +# instance. It is intended for testing full runtest invocations +# that need a real server instance to work. Starts a server at +# `http://localhost:9089`. +# ./server_wrapper +# Example: +# ./server_wrapper /tmp 9089 lnt runtest nt --submit "http://localhost:9089/db_default/submitRun" --cc /bin/clang --sandbox /tmp/sandbox + +# First launch the server. + +PROGRAM="$(basename $0)" + +usage() { + echo "usage: $PROGRAM [arguments for lnt runtest]" + echo "e.g: $PROGRAM /tmp/ nt yes --cc /bin/clang --sandbox /tmp/sandbox" +} + +error() { + echo "error: $PROGRAM: $*" >&2 + usage >&2 + exit 1 +} + +main() { + [ $# -lt 2 ] && + error "not enough arguments" + + local serverinstance=$1 + local portnr=$2 + shift 2 + + lnt runserver $serverinstance --hostname localhost --port $portnr >& $serverinstance/server_wrapper_runserver.log & + sleep 1 # Give the server some time to start. + local pid=$! + + # Execute command. + eval "$@" + local rc=$? + + kill -15 $pid + local kill_rc=$? + [ $kill_rc -ne 0 ] && + error "wha happen?? $kill_rc" + + wait $pid + exit $rc +} + +main "$@" Index: lnt/trunk/tests/lnttool/submit.py =================================================================== --- lnt/trunk/tests/lnttool/submit.py +++ lnt/trunk/tests/lnttool/submit.py @@ -0,0 +1,31 @@ +# RUN: rm -rf %t.instance +# RUN: python %{shared_inputs}/create_temp_instance.py \ +# RUN: %s %{shared_inputs}/SmallInstance %t.instance +# RUN: %{shared_inputs}/server_wrapper.sh %t.instance 9091 \ +# RUN: lnt submit "http://localhost:9091/db_default/submitRun" --commit=1 \ +# RUN: %{shared_inputs}/sample-report.json | \ +# RUN: FileCheck %s --check-prefix=CHECK-DEFAULT +# +# CHECK-DEFAULT: http://localhost:9091/db_default/v4/nts/3 +# +# RUN: rm -rf %t.instance +# RUN: python %{shared_inputs}/create_temp_instance.py \ +# RUN: %s %{shared_inputs}/SmallInstance %t.instance +# RUN: %{shared_inputs}/server_wrapper.sh %t.instance 9091 \ +# RUN: lnt submit "http://localhost:9091/db_default/submitRun" --commit=1 \ +# RUN: %{shared_inputs}/sample-report.json -v | \ +# RUN: FileCheck %s --check-prefix=CHECK-VERBOSE +# +# CHECK-VERBOSE: Import succeeded. +# CHECK-VERBOSE: --- Tested: 10 tests -- +# +# CHECK-VERBOSE: Imported Data +# CHECK-VERBOSE: ------------- +# CHECK-VERBOSE: Added Machines: 1 +# CHECK-VERBOSE: Added Runs : 1 +# CHECK-VERBOSE: Added Tests : 2 +# +# CHECK-VERBOSE: Results +# CHECK-VERBOSE: ---------------- +# CHECK-VERBOSE: PASS : 10 +# CHECK-VERBOSE: Results available at: http://localhost:9091/db_default/v4/nts/3 Index: lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh =================================================================== --- lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh +++ lnt/trunk/tests/runtest/Inputs/runtest_server_wrapper.sh @@ -1,54 +0,0 @@ -#!/bin/bash -# This script wraps a call to lnt runtest with a local server -# instance. It is intended for testing full runtest invocations -# that need a real server instnace to work. -# ./runtest_server_wrapper [arguments for lnt runtest] -# ./runtest_server_wrapper /tmp/ nt --cc /bin/clang --sandbox /tmp/sandbox - -# First launch the server. - -PROGRAM="$(basename $0)" - -usage() { - echo "usage: $PROGRAM [arguments for lnt runtest]" - echo "e.g: $PROGRAM /tmp/ nt yes --cc /bin/clang --sandbox /tmp/sandbox" -} - -error() { - echo "error: $PROGRAM: $*" >&2 - usage >&2 - exit 1 -} - -main() { - [ $# -lt 2 ] && - error "not enough arguments" - - local serverinstance=$1 - local portnr=$4 - lnt runserver $serverinstance --hostname localhost --port $portnr & - local pid=$! - local type=$2 - local submit_through_url=$3 - shift 4 - case $submit_through_url in - [yY][eE][sS]|[yY]) - submit_pointer=http://localhost:$portnr/db_default/submitRun - ;; - *) - submit_pointer=$serverinstance - ;; - esac - lnt runtest $type --submit $submit_pointer $@ - local rc=$? - - kill -15 $pid - local kill_rc=$? - [ $kill_rc -ne 0 ] && - error "wha happen?? $kill_rc" - - wait $pid - exit $rc -} - -main "$@" Index: lnt/trunk/tests/runtest/nt.py =================================================================== --- lnt/trunk/tests/runtest/nt.py +++ lnt/trunk/tests/runtest/nt.py @@ -151,8 +151,9 @@ # Check submission to a server through url works: # RUN: rsync -av --exclude .svn %S/Inputs/rerun_server_instance/ \ # RUN: %{test_exec_root}/runtest/nt_server_instance -# RUN: %S/Inputs/runtest_server_wrapper.sh \ -# RUN: %{test_exec_root}/runtest/nt_server_instance nt yes 9089 \ +# RUN: %{shared_inputs}/server_wrapper.sh \ +# RUN: %{test_exec_root}/runtest/nt_server_instance 9089 \ +# RUN: lnt runtest nt --submit "http://localhost:9089/db_default/submitRun" \ # RUN: --sandbox %t.SANDBOX \ # RUN: --test-suite %S/Inputs/rerun-test-suite1 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ @@ -179,8 +180,9 @@ # Check submission to a server through server instance works: # RUN: rsync -av --exclude .svn %S/Inputs/rerun_server_instance/ \ # RUN: %{test_exec_root}/runtest/nt_server_instance -# RUN: %S/Inputs/runtest_server_wrapper.sh \ -# RUN: %{test_exec_root}/runtest/nt_server_instance nt no 9089 \ +# RUN: %{shared_inputs}/server_wrapper.sh \ +# RUN: %{test_exec_root}/runtest/nt_server_instance 9089 \ +# RUN: lnt runtest nt --submit "http://localhost:9089/db_default/submitRun" \ # RUN: --sandbox %t.SANDBOX \ # RUN: --test-suite %S/Inputs/rerun-test-suite1 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ Index: lnt/trunk/tests/runtest/rerun.py =================================================================== --- lnt/trunk/tests/runtest/rerun.py +++ lnt/trunk/tests/runtest/rerun.py @@ -7,8 +7,9 @@ # RUN: rsync -av --exclude .svn %S/Inputs/rerun_server_instance/ \ # RUN: %{test_exec_root}/runtest/rerun_server_instance # RUN: rm -f CHECK-STDOUT CHECK-STDOUT2 CHECK-STDERR CHECK-STDERR2 -# RUN: %S/Inputs/runtest_server_wrapper.sh \ -# RUN: %{test_exec_root}/runtest/rerun_server_instance nt yes 9090 \ +# RUN: %{shared_inputs}/server_wrapper.sh \ +# RUN: %{test_exec_root}/runtest/rerun_server_instance 9090 \ +# RUN: lnt runtest nt --submit "http://localhost:9090/db_default/submitRun" \ # RUN: --sandbox %t.SANDBOX \ # RUN: --test-suite %S/Inputs/rerun-test-suite1 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \ @@ -32,8 +33,9 @@ # CHECK-STDERR: submitting result to # CHECK-STDERR: Rerunning 0 of 69 benchmarks. -# RUN: %S/Inputs/runtest_server_wrapper.sh \ -# RUN: %{test_exec_root}/runtest/rerun_server_instance nt yes 9090 \ +# RUN: %{shared_inputs}/server_wrapper.sh \ +# RUN: %{test_exec_root}/runtest/rerun_server_instance 9090 \ +# RUN: lnt runtest nt --submit "http://localhost:9090/db_default/submitRun" \ # RUN: --sandbox %t.SANDBOX2 \ # RUN: --test-suite %S/Inputs/rerun-test-suite2 \ # RUN: --cc %{shared_inputs}/FakeCompilers/clang-r154331 \