Index: lnt/lnttool/import_data.py =================================================================== --- lnt/lnttool/import_data.py +++ lnt/lnttool/import_data.py @@ -42,11 +42,12 @@ # Get the database. with contextlib.closing(config.get_database(database)) as db: + session = db.make_session() # Load the database. success = True for file_name in files: result = lnt.util.ImportData.import_and_report( - config, database, db, file_name, + config, database, db, session, file_name, output_format, testsuite, show_sample_count, no_email, no_report, updateMachine=update_machine, mergeRun=merge) Index: lnt/lnttool/main.py =================================================================== --- lnt/lnttool/main.py +++ lnt/lnttool/main.py @@ -92,9 +92,10 @@ import lnt.util.ImportData db = lnt.server.db.v4db.V4DB('sqlite:///:memory:', lnt.server.config.Config.dummy_instance()) + session = db.make_session() for file in files: result = lnt.util.ImportData.import_and_report( - None, None, db, file, '', testsuite) + None, None, db, session, file, '', testsuite) lnt.util.ImportData.print_report_result(result, sys.stdout, sys.stderr, verbose=True) @@ -243,6 +244,7 @@ # Get the database. with contextlib.closing(config.get_database(database)) as db: + session = db.make_session() # Get the testsuite. ts = db.testsuite[testsuite] @@ -251,7 +253,7 @@ date = datetime.datetime.utcnow() else: # Get a timestamp to use to derive the daily report to generate. - latest = ts.query(ts.Run).\ + latest = session.query(ts.Run).\ order_by(ts.Run.start_time.desc()).limit(1).first() # If we found a run, use it's start time (rounded up to the next @@ -269,7 +271,7 @@ day_start_offset_hours=date.hour, for_mail=True, num_prior_days_to_include=days, filter_machine_regex=filter_machine_regex) - report.build() + report.build(session) logger.info("generating HTML report...") ts_url = "%s/db_%s/v4/%s" \ @@ -337,6 +339,7 @@ # Get the database. with contextlib.closing(config.get_database(database)) as db: + session = db.make_session() # Get the testsuite. ts = db.testsuite[testsuite] @@ -344,9 +347,9 @@ # Lookup the two runs. run_a_id = int(run_a_id) run_b_id = int(run_b_id) - run_a = ts.query(ts.Run).\ + run_a = session.query(ts.Run).\ filter_by(id=run_a_id).first() - run_b = ts.query(ts.Run).\ + run_b = session.query(ts.Run).\ filter_by(id=run_b_id).first() if run_a is None: logger.error("invalid run ID %r (not in database)" % (run_a_id,)) @@ -355,8 +358,8 @@ # Generate the report. data = lnt.server.reporting.runs.generate_run_data( - run_b, baseurl=config.zorgURL, result=None, compare_to=run_a, - baseline=None, aggregation_fn=min) + session, run_b, baseurl=config.zorgURL, result=None, + compare_to=run_a, baseline=None, aggregation_fn=min) env = lnt.server.ui.app.create_jinja_environment() text_template = env.get_template('reporting/run_report.txt') Index: lnt/lnttool/updatedb.py =================================================================== --- lnt/lnttool/updatedb.py +++ lnt/lnttool/updatedb.py @@ -34,21 +34,23 @@ # Get the database and test suite. with contextlib.closing(instance.get_database(database)) as db: + session = db.make_session() ts = db.testsuite[testsuite] order = None # Compute a list of all the runs to delete. if delete_order: - runs = ts.query(ts.Run).join(ts.Order) \ + runs = session.query(ts.Run).join(ts.Order) \ .filter(ts.Order.id == delete_order).all() else: - runs = ts.query(ts.Run).filter(ts.Run.id.in_(delete_runs)).all() + runs = session.query(ts.Run) \ + .filter(ts.Run.id.in_(delete_runs)).all() for run in runs: - ts.delete(run) + session.delete(run) if delete_machines: - machines = ts.query(ts.Machine) \ + machines = session.query(ts.Machine) \ .filter(ts.Machine.name.in_(delete_machines)).all() for machine in machines: - ts.delete(machine) + session.delete(machine) - db.commit() + session.commit() Index: lnt/lnttool/viewcomparison.py =================================================================== --- lnt/lnttool/viewcomparison.py +++ lnt/lnttool/viewcomparison.py @@ -83,10 +83,11 @@ # Import the two reports. with contextlib.closing(config.get_database('default')) as db: - r = import_and_report(config, 'default', db, report_a, '', - testsuite, updateMachine=True) - import_and_report(config, 'default', db, report_b, '', - testsuite, updateMachine=True) + session = db.make_session() + r = import_and_report(config, 'default', db, session, report_a, + '', testsuite, updateMachine=True) + import_and_report(config, 'default', db, session, report_b, + '', testsuite, updateMachine=True) # Dispatch another thread to start the webbrowser. comparison_url = '%s/v4/nts/2?compare_to=1' % (url,) Index: lnt/server/db/fieldchange.py =================================================================== --- lnt/server/db/fieldchange.py +++ lnt/server/db/fieldchange.py @@ -15,16 +15,16 @@ FIELD_CHANGE_LOOKBACK = 10 -def post_submit_tasks(ts, run_id): - regenerate_fieldchanges_for_run(ts, run_id) +def post_submit_tasks(session, ts, run_id): + regenerate_fieldchanges_for_run(ts, session, run_id) -def delete_fieldchange(ts, change): +def delete_fieldchange(session, ts, change): """Delete this field change. Since it might be attahed to a regression via regression indicators, fix those up too. If this orphans a regression delete it as well.""" # Find the indicators. - indicators = ts.query(ts.RegressionIndicator). \ + indicators = session.query(ts.RegressionIndicator). \ filter(ts.RegressionIndicator.field_change_id == change.id). \ all() # And all the related regressions. @@ -32,43 +32,45 @@ # Remove the idicators that point to this change. for ind in indicators: - ts.delete(ind) + session.delete(ind) # Now we can remove the change, itself. - ts.delete(change) + session.delete(change) # We might have just created a regression with no changes. # If so, delete it as well. deleted_ids = [] for r in regression_ids: - remaining = ts.query(ts.RegressionIndicator). \ + remaining = session.query(ts.RegressionIndicator). \ filter(ts.RegressionIndicator.regression_id == r). \ all() if len(remaining) == 0: - r = ts.query(ts.Regression).get(r) + r = session.query(ts.Regression).get(r) logger.info("Deleting regression because it has not changes:" + repr(r)) - ts.delete(r) + session.delete(r) deleted_ids.append(r) - ts.commit() + session.commit() return deleted_ids @timed -def regenerate_fieldchanges_for_run(ts, run_id): +def regenerate_fieldchanges_for_run(ts, session, run_id): """Regenerate the set of FieldChange objects for the given run. """ # Allow for potentially a few different runs, previous_runs, next_runs # all with the same order_id which we will aggregate together to make # our comparison result. - run = ts.getRun(run_id) - runs = ts.query(ts.Run). \ + run = ts.getRun(session, run_id) + runs = session.query(ts.Run). \ filter(ts.Run.order_id == run.order_id). \ filter(ts.Run.machine_id == run.machine_id). \ all() - previous_runs = ts.get_previous_runs_on_machine(run, FIELD_CHANGE_LOOKBACK) - next_runs = ts.get_next_runs_on_machine(run, FIELD_CHANGE_LOOKBACK) + previous_runs = ts.get_previous_runs_on_machine(session, run, + FIELD_CHANGE_LOOKBACK) + next_runs = ts.get_next_runs_on_machine(session, run, + FIELD_CHANGE_LOOKBACK) # Find our start/end order. if previous_runs != []: @@ -90,7 +92,7 @@ if run_size > 50: logger.warning("Generating field changes for {} runs." "That will be very slow.".format(run_size)) - runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load) + runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, runs_to_load) # Only store fieldchanges for "metric" samples like execution time; # not for fields with other data, e.g. hash of a binary @@ -102,12 +104,12 @@ ts.Sample.get_hash_of_binary_field()) # Try and find a matching FC and update, else create one. try: - f = ts.query(ts.FieldChange) \ + f = session.query(ts.FieldChange) \ .filter(ts.FieldChange.start_order == start_order) \ .filter(ts.FieldChange.end_order == end_order) \ .filter(ts.FieldChange.test_id == test_id) \ .filter(ts.FieldChange.machine == run.machine) \ - .filter(ts.FieldChange.field == field) \ + .filter(ts.FieldChange.field_id == field.id) \ .one() except sqlalchemy.orm.exc.NoResultFound: f = None @@ -115,25 +117,29 @@ if not result.is_result_performance_change() and f: # With more data, its not a regression. Kill it! logger.info("Removing field change: {}".format(f.id)) - deleted = delete_fieldchange(ts, f) + deleted = delete_fieldchange(session, ts, f) continue if result.is_result_performance_change() and not f: - test = ts.query(ts.Test).filter(ts.Test.id == test_id).one() + test = session.query(ts.Test) \ + .filter(ts.Test.id == test_id) \ + .one() f = ts.FieldChange(start_order=start_order, end_order=run.order, machine=run.machine, test=test, - field=field) + field_id=field.id) # Check the rules to see if this change matters. - if rules.is_useful_change(ts, f): - ts.add(f) + if rules.is_useful_change(session, ts, f): + session.add(f) try: - found, new_reg = identify_related_changes(ts, f) + found, new_reg = identify_related_changes(session, ts, + f) except ObjectDeletedError: # This can happen from time to time. # So, lets retry once. - found, new_reg = identify_related_changes(ts, f) + found, new_reg = identify_related_changes(session, ts, + f) if found: logger.info("Found field change: {}".format( @@ -144,10 +150,10 @@ f.old_value = result.previous f.new_value = result.current f.run = run - ts.commit() + session.commit() - regressions = ts.query(ts.Regression).all()[::-1] - rules.post_submission_hooks(ts, regressions) + regressions = session.query(ts.Regression).all()[::-1] + rules.post_submission_hooks(session, ts, regressions) def is_overlaping(fc1, fc2): @@ -176,7 +182,7 @@ @timed -def identify_related_changes(ts, fc): +def identify_related_changes(session, ts, fc): """Can we find a home for this change in some existing regression? If a match is found add a regression indicator adding this change to that regression, otherwise create a new regression for this change. @@ -185,14 +191,14 @@ ranges. Then looks for changes that are similar. """ - regressions = ts.query(ts.Regression.id) \ + regressions = session.query(ts.Regression.id) \ .filter(or_(ts.Regression.state == RegressionState.DETECTED, ts.Regression.state == RegressionState.DETECTED_FIXED)) \ .all() for regression_packed in regressions: regression_id = regression_packed[0] - regression_indicators = get_ris(ts, regression_id) + regression_indicators = get_ris(session, ts, regression_id) print "RIs:", regression_indicators for change in regression_indicators: regression_change = change.field_change @@ -204,21 +210,22 @@ confidence += percent_similar(regression_change.test.name, fc.test.name) - if regression_change.field == fc.field: + if regression_change.field_id == fc.field_id: confidence += 1.0 if confidence >= 2.0: # Matching MSG = "Found a match: {} with score {}." - regression = ts.query(ts.Regression).get(regression_id) + regression = session.query(ts.Regression) \ + .get(regression_id) logger.info(MSG.format(str(regression), confidence)) ri = ts.RegressionIndicator(regression, fc) - ts.add(ri) + session.add(ri) # Update the default title if needed. - rebuild_title(ts, regression) - ts.commit() + rebuild_title(session, ts, regression) + session.commit() return True, regression logger.info("Could not find a partner, creating new Regression for change") - new_reg = new_regression(ts, [fc.id]) + new_reg = new_regression(session, ts, [fc.id]) return False, new_reg Index: lnt/server/db/regression.py =================================================================== --- lnt/server/db/regression.py +++ lnt/server/db/regression.py @@ -35,26 +35,26 @@ ChangeData = namedtuple("ChangeData", ["ri", "cr", "run", "latest_cr"]) -def new_regression(ts, field_changes): +def new_regression(session, ts, field_changes): """Make a new regression and add to DB.""" today = datetime.date.today() MSG = "Regression of 0 benchmarks" title = MSG regression = ts.Regression(title, "", RegressionState.DETECTED) - ts.add(regression) + session.add(regression) for fc_id in field_changes: - fc = get_fieldchange(ts, fc_id) + fc = get_fieldchange(session, ts, fc_id) ri1 = ts.RegressionIndicator(regression, fc) - ts.add(ri1) - rebuild_title(ts, regression) - ts.commit() + session.add(ri1) + rebuild_title(session, ts, regression) + session.commit() return regression -def rebuild_title(ts, regression): +def rebuild_title(session, ts, regression): """Update the title of a regresson.""" if re.match("Regression of \d+ benchmarks.*", regression.title): - old_changes = ts.query(ts.RegressionIndicator) \ + old_changes = session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression.id) \ .all() new_size = len(old_changes) @@ -70,66 +70,68 @@ return regression -def get_all_orders_for_machine(ts, machine): +def get_all_orders_for_machine(session, ts, machine): """Get all the oredrs for this sa machine.""" - return ts.query(ts.Order) \ + return session.query(ts.Order) \ .join(ts.Run) \ .filter(ts.Run.machine_id == machine) \ .order_by(asc(ts.Order.llvm_project_revision)) \ .all() -def get_ris(ts, regression_id): - return ts.query(ts.RegressionIndicator) \ +def get_ris(session, ts, regression_id): + return session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression_id) \ .all() -def get_runs_for_order_and_machine(ts, order_id, machine_id): +def get_runs_for_order_and_machine(session, ts, order_id, machine_id): """Collect all the runs for a particular order/machine combo.""" - runs = ts.query(ts.Run) \ + runs = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .filter(ts.Run.order_id == order_id) \ .all() return runs -def get_runs_of_fieldchange(ts, fc): - before_runs = get_runs_for_order_and_machine(ts, fc.start_order_id, +def get_runs_of_fieldchange(session, ts, fc): + before_runs = get_runs_for_order_and_machine(session, ts, + fc.start_order_id, fc.machine_id) - after_runs = get_runs_for_order_and_machine(ts, fc.end_order_id, + after_runs = get_runs_for_order_and_machine(session, ts, fc.end_order_id, fc.machine_id) return ChangeRuns(before_runs, after_runs) -def get_current_runs_of_fieldchange(ts, fc): - before_runs = get_runs_for_order_and_machine(ts, fc.start_order_id, +def get_current_runs_of_fieldchange(session, ts, fc): + before_runs = get_runs_for_order_and_machine(session, ts, + fc.start_order_id, fc.machine_id) - newest_order = get_all_orders_for_machine(ts, fc.machine_id)[-1] + newest_order = get_all_orders_for_machine(session, ts, fc.machine_id)[-1] - after_runs = get_runs_for_order_and_machine(ts, newest_order.id, + after_runs = get_runs_for_order_and_machine(session, ts, newest_order.id, fc.machine_id) return ChangeRuns(before_runs, after_runs) -def get_first_runs_of_fieldchange(ts, fc): - run = ts.query(ts.Run) \ +def get_first_runs_of_fieldchange(session, ts, fc): + run = session.query(ts.Run) \ .filter(ts.Run.machine_id == fc.machine_id) \ .filter(ts.Run.order_id == fc.end_order_id) \ .first() return run -def get_cr_for_field_change(ts, field_change, current=False): +def get_cr_for_field_change(session, ts, field_change, current=False): """Given a filed_change, calculate a comparison result for that change. And the last run.""" if current: - runs = get_current_runs_of_fieldchange(ts, field_change) + runs = get_current_runs_of_fieldchange(session, ts, field_change) else: - runs = get_runs_of_fieldchange(ts, field_change) + runs = get_runs_of_fieldchange(session, ts, field_change) runs_all = list(runs.before) runs_all.extend(runs.after) - ri = RunInfo(ts, [r.id for r in runs_all], + ri = RunInfo(session, ts, [r.id for r in runs_all], only_tests=[field_change.test_id]) cr = ri.get_comparison_result(runs.after, runs.before, field_change.test.id, field_change.field, @@ -137,6 +139,7 @@ return cr, runs.after[0], runs_all -def get_fieldchange(ts, fc_id): +def get_fieldchange(session, ts, fc_id): """Get a fieldchange given an ID.""" - return ts.query(ts.FieldChange).filter(ts.FieldChange.id == fc_id).one() + return session.query(ts.FieldChange) \ + .filter(ts.FieldChange.id == fc_id).one() Index: lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py =================================================================== --- lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py +++ lnt/server/db/rules/rule_blacklist_benchmarks_by_name.py @@ -30,7 +30,7 @@ logger.warning("Ignoring blacklist file: {}".format(path)) -def filter_by_benchmark_name(ts, field_change): +def filter_by_benchmark_name(session, ts, field_change): """Is this a fieldchanges we care about? """ if ignored is None: Index: lnt/server/db/rules/rule_update_fixed_regressions.py =================================================================== --- lnt/server/db/rules/rule_update_fixed_regressions.py +++ lnt/server/db/rules/rule_update_fixed_regressions.py @@ -8,29 +8,29 @@ from lnt.testing.util.commands import timed -def _fixed_rind(ts, rind): +def _fixed_rind(session, ts, rind): """Is this regression indicator fixed?""" fc = rind.field_change if fc is None: return False - current_cr, _, _ = get_cr_for_field_change(ts, fc, current=True) + current_cr, _, _ = get_cr_for_field_change(session, ts, fc, current=True) if current_cr.pct_delta < 0.01: return True else: return False -def is_fixed(ts, regression): +def is_fixed(session, ts, regression): """Comparing the current value to the regression, is this regression now fixed? """ - r_inds = get_ris(ts, regression.id) - fixes = [_fixed_rind(ts, x) for x in r_inds] + r_inds = get_ris(session, ts, regression.id) + fixes = [_fixed_rind(session, ts, x) for x in r_inds] return all(fixes) @timed -def regression_evolution(ts, run_id): +def regression_evolution(session, ts, run_id): """Analyse regressions. If they have changes, process them. Look at each regression in state detect. Move to ignore if it is fixed. Look at each regression in state stage. Move to verify if fixed. @@ -41,7 +41,7 @@ changed = 0 evolve_states = [RegressionState.DETECTED, RegressionState.STAGED, RegressionState.ACTIVE] - regressions = ts.query(ts.Regression) \ + regressions = session.query(ts.Regression) \ .filter(ts.Regression.state.in_(evolve_states)) \ .all() @@ -50,26 +50,26 @@ active = [r for r in regressions if r.state == RegressionState.ACTIVE] for regression in detects: - if is_fixed(ts, regression): + if is_fixed(session, ts, regression): logger.info("Detected fixed regression" + str(regression)) regression.state = RegressionState.IGNORED regression.title = regression.title + " [Detected Fixed]" changed += 1 for regression in staged: - if is_fixed(ts, regression): + if is_fixed(session, ts, regression): logger.info("Staged fixed regression" + str(regression)) regression.state = RegressionState.DETECTED_FIXED regression.title = regression.title + " [Detected Fixed]" changed += 1 for regression in active: - if is_fixed(ts, regression): + if is_fixed(session, ts, regression): logger.info("Active fixed regression" + str(regression)) regression.state = RegressionState.DETECTED_FIXED regression.title = regression.title + " [Detected Fixed]" changed += 1 - ts.commit() + session.commit() logger.info("Changed the state of {} regressions".format(changed)) post_submission_hook = regression_evolution Index: lnt/server/db/rules/rule_update_profile_stats.py =================================================================== --- lnt/server/db/rules/rule_update_profile_stats.py +++ lnt/server/db/rules/rule_update_profile_stats.py @@ -10,7 +10,7 @@ import time -def update_profile_stats(ts, run_id): +def update_profile_stats(session, ts, run_id): config = ts.v4db.config history_path = os.path.join(config.profileDir, '_profile-history.json') Index: lnt/server/db/rules_manager.py =================================================================== --- lnt/server/db/rules_manager.py +++ lnt/server/db/rules_manager.py @@ -68,17 +68,17 @@ return HOOKS -def post_submission_hooks(ts, run_id): +def post_submission_hooks(session, ts, run_id): """Run all the post submission hooks on the submitted run.""" for func in HOOKS['post_submission_hook']: - func(ts, run_id) + func(session, ts, run_id) -def is_useful_change(ts, field_change): +def is_useful_change(session, ts, field_change): """Run all the change filters. If any are false, drop this change.""" all_filters = [] for func in HOOKS['is_useful_change']: - decision = func(ts, field_change) + decision = func(session, ts, field_change) all_filters.append(decision) if len(all_filters) == 0: return True Index: lnt/server/db/search.py =================================================================== --- lnt/server/db/search.py +++ lnt/server/db/search.py @@ -1,7 +1,7 @@ import re -def _naive_search_for_run(ts, query, num_results, default_machine): +def _naive_search_for_run(session, ts, query, num_results, default_machine): """ This 'naive' search doesn't rely on any indexes so can be used without full-text search enabled. This does make it less clever however. @@ -37,7 +37,7 @@ if not machine_queries: machines = [default_machine] else: - for m in ts.query(ts.Machine).all(): + for m in session.query(ts.Machine).all(): if all(q in m.name for q in machine_queries): machines.append(m.id) @@ -50,7 +50,7 @@ llvm_project_revision_col = \ ts.Order.fields[llvm_project_revision_idx].column - q = ts.query(ts.Run) \ + q = session.query(ts.Run) \ .filter(ts.Run.machine_id.in_(machines)) \ .filter(ts.Run.order_id == ts.Order.id) \ .filter(llvm_project_revision_col.isnot(None)) @@ -61,7 +61,7 @@ return q.order_by(ts.Run.id.desc()).limit(num_results).all() -def search(ts, query, +def search(session, ts, query, num_results=8, default_machine=None): """ Performs a textual search for a run. The exact syntax supported depends on @@ -77,5 +77,5 @@ Returns a list of Run objects. """ - return _naive_search_for_run(ts, query, + return _naive_search_for_run(session, ts, query, num_results, default_machine) Index: lnt/server/db/testsuitedb.py =================================================================== --- lnt/server/db/testsuitedb.py +++ lnt/server/db/testsuitedb.py @@ -11,7 +11,7 @@ import aniso8601 import sqlalchemy -from flask import session +import flask from sqlalchemy import * from sqlalchemy.orm import relation from sqlalchemy.orm.exc import ObjectDeletedError @@ -127,7 +127,7 @@ class Machine(self.base, ParameterizedMixin): __tablename__ = db_key_name + '_Machine' - DEFAULT_BASELINE_REVISION = self.v4db.baseline_revision + DEFAULT_BASELINE_REVISION = v4db.baseline_revision fields = self.machine_fields id = Column("ID", Integer, primary_key=True) @@ -165,21 +165,22 @@ def parameters(self, data): self.parameters_data = json.dumps(sorted(data.items())) - def get_baseline_run(self): + def get_baseline_run(self, session): ts = Machine.testsuite - user_baseline = ts.get_users_baseline() + user_baseline = ts.get_users_baseline(session) if user_baseline: return self.get_closest_previously_reported_run( - user_baseline.order) + session, user_baseline.order) else: mach_base = Machine.DEFAULT_BASELINE_REVISION # If we have an int, convert it to a proper string. if isinstance(mach_base, int): mach_base = '% 7d' % mach_base return self.get_closest_previously_reported_run( - ts.Order(llvm_project_revision=mach_base)) + session, ts.Order(llvm_project_revision=mach_base)) - def get_closest_previously_reported_run(self, order_to_find): + def get_closest_previously_reported_run(self, session, + order_to_find): """ Find the closest previous run to the requested order, for which this machine also reported. @@ -189,7 +190,7 @@ ts = Machine.testsuite # Search for best order. best_order = None - for order in ts.query(ts.Order).\ + for order in session.query(ts.Order).\ join(ts.Run).\ filter(ts.Run.machine_id == self.id).distinct(): if order >= order_to_find and \ @@ -200,7 +201,7 @@ # that order. closest_run = None if best_order: - closest_run = ts.query(ts.Run)\ + closest_run = session.query(ts.Run)\ .filter(ts.Run.machine_id == self.id)\ .filter(ts.Run.order_id == best_order.id)\ .order_by(ts.Run.start_time.desc()).first() @@ -500,7 +501,7 @@ those which have a value that can be interpreted as better or worse than other potential values for this field. """ - for field in self.Sample.fields: + for field in Sample.fields: if field.type.name in ['Real', 'Integer']: yield field @@ -586,7 +587,7 @@ test_id = Column("TestID", Integer, ForeignKey(Test.id)) machine_id = Column("MachineID", Integer, ForeignKey(Machine.id)) field_id = Column("FieldID", Integer, - ForeignKey(self.v4db.SampleField.id)) + ForeignKey(testsuite.SampleField.id)) # Could be from many runs, but most recent one is interesting. run_id = Column("RunID", Integer, ForeignKey(Run.id)) @@ -596,18 +597,16 @@ 'end_order_id==Order.id') test = relation(Test) machine = relation(Machine) - field = relation(self.v4db.SampleField, - primaryjoin=(self.v4db.SampleField.id == - field_id)) + field = relation(testsuite.SampleField) run = relation(Run) def __init__(self, start_order, end_order, machine, - test, field): + test, field_id): self.start_order = start_order self.end_order = end_order self.machine = machine - self.field = field self.test = test + self.field_id = field_id def __repr__(self): return '%s_%s%r' % (db_key_name, self.__class__.__name__, @@ -749,34 +748,26 @@ sqlalchemy.schema.Index("ix_%s_Sample_RunID_TestID" % db_key_name, Sample.run_id, Sample.test_id) - # Add several shortcut aliases, similar to the ones on the v4db. - self.session = self.v4db.session - self.add = self.v4db.add - self.delete = self.v4db.delete - self.commit = self.v4db.commit - self.query = self.v4db.query - self.rollback = self.v4db.rollback - if create_tables: self.base.metadata.create_all(v4db.engine) - def get_baselines(self): - return self.query(self.Baseline).all() + def get_baselines(self, session): + return session.query(self.Baseline).all() - def get_users_baseline(self): + def get_users_baseline(self, session): try: baseline_key = lnt.server.ui.util.baseline_key(self.name) - session_baseline = session.get(baseline_key) + session_baseline = flask.session.get(baseline_key) except RuntimeError: # Sometimes this is called from outside the app context. # In that case, don't get the user's session baseline. return None if session_baseline: - return self.query(self.Baseline).get(session_baseline) + return session.query(self.Baseline).get(session_baseline) return None - def _getOrCreateMachine(self, machine_data, forceUpdate): + def _getOrCreateMachine(self, session, machine_data, forceUpdate): """ _getOrCreateMachine(data, forceUpdate) -> Machine @@ -795,12 +786,12 @@ machine.parameters = machine_parameters # Look for an existing machine. - existing_machines = self.query(self.Machine) \ + existing_machines = session.query(self.Machine) \ .filter(self.Machine.name == name) \ .order_by(self.Machine.id.desc()) \ .all() if len(existing_machines) == 0: - self.add(machine) + session.add(machine) return machine existing = existing_machines[0] @@ -840,7 +831,7 @@ existing.parameters = existing_parameters return existing - def _getOrCreateOrder(self, run_parameters): + def _getOrCreateOrder(self, session, run_parameters): """ _getOrCreateOrder(data) -> Order @@ -851,7 +842,7 @@ provided ddata argument. """ - query = self.query(self.Order) + query = session.query(self.Order) order = self.Order() # First, extract all of the specified order fields. @@ -874,11 +865,11 @@ # linked list. # Add the new order and commit, to assign an ID. - self.add(order) - self.v4db.session.commit() + session.add(order) + session.commit() # Load all the orders. - orders = list(self.query(self.Order)) + orders = list(session.query(self.Order)) # Sort the objects to form the total ordering. orders.sort() @@ -899,9 +890,9 @@ return order - def _getOrCreateRun(self, run_data, machine, merge): + def _getOrCreateRun(self, session, run_data, machine, merge): """ - _getOrCreateRun(run_data, machine, merge) -> Run, bool + _getOrCreateRun(session, run_data, machine, merge) -> Run, bool Add a new Run record from the given data (as recorded by the test interchange format). @@ -929,10 +920,10 @@ run_parameters.pop('simple_run_id', None) # Find the order record. - order = self._getOrCreateOrder(run_parameters) + order = self._getOrCreateOrder(session, run_parameters) if merge != 'append': - existing_runs = self.query(self.Run) \ + existing_runs = session.query(self.Run) \ .filter(self.Run.machine_id == machine.id) \ .filter(self.Run.order_id == order.id) \ .all() @@ -942,7 +933,7 @@ order.name) elif merge == 'replace': for run in existing_runs: - self.delete(run) + session.delete(run) else: raise ValueError('Invalid Run mergeStrategy %r' % merge) @@ -970,15 +961,15 @@ # Any remaining parameters are saved as a JSON encoded array. run.parameters = run_parameters - self.add(run) + session.add(run) return run - def _importSampleValues(self, tests_data, run, config): + def _importSampleValues(self, session, tests_data, run, config): # Load a map of all the tests, which we will extend when we find tests # that need to be added. # Downcast to str, so we match on MySQL. test_cache = dict((str(test.name), test) - for test in self.query(self.Test)) + for test in session.query(self.Test)) profiles = dict() field_dict = dict([(f.name, f) for f in self.sample_fields]) @@ -988,7 +979,7 @@ if test is None: test = self.Test(test_data['name']) test_cache[name] = test - self.add(test) + session.add(test) samples = [] for key, values in test_data.items(): @@ -1003,7 +994,7 @@ values = [values] while len(samples) < len(values): sample = self.Sample(run, test) - self.add(sample) + session.add(sample) samples.append(sample) for sample, value in zip(samples, values): if key == 'profile': @@ -1012,7 +1003,8 @@ else: sample.set_field(field, value) - def importDataFromDict(self, data, config, updateMachine, mergeRun): + def importDataFromDict(self, session, data, config, updateMachine, + mergeRun): """ importDataFromDict(data, config, updateMachine, mergeRun) -> Run (or throws ValueError exception) @@ -1022,26 +1014,27 @@ like mismatching machine data or duplicate run submission with mergeRun == 'reject'. """ - machine = self._getOrCreateMachine(data['machine'], updateMachine) - run = self._getOrCreateRun(data['run'], machine, mergeRun) - self._importSampleValues(data['tests'], run, config) + machine = self._getOrCreateMachine(session, data['machine'], + updateMachine) + run = self._getOrCreateRun(session, data['run'], machine, mergeRun) + self._importSampleValues(session, data['tests'], run, config) return run # Simple query support (mostly used by templates) - def machines(self, name=None): - q = self.query(self.Machine) + def machines(self, session, name=None): + q = session.query(self.Machine) if name: q = q.filter_by(name=name) return q - def getMachine(self, id): - return self.query(self.Machine).filter_by(id=id).one() + def getMachine(self, session, id): + return session.query(self.Machine).filter_by(id=id).one() - def getRun(self, id): - return self.query(self.Run).filter_by(id=id).one() + def getRun(self, session, id): + return session.query(self.Run).filter_by(id=id).one() - def get_adjacent_runs_on_machine(self, run, N, direction=-1): + def get_adjacent_runs_on_machine(self, session, run, N, direction=-1): """ get_adjacent_runs_on_machine(run, N, direction=-1) -> [Run*] @@ -1089,7 +1082,7 @@ # # FIXME: Scalability! However, pretty fast in practice, see elaborate # explanation above. - all_machine_orders = self.query(self.Order).\ + all_machine_orders = session.query(self.Order).\ join(self.Run).\ filter(self.Run.machine == run.machine).distinct().all() all_machine_orders.sort() @@ -1109,7 +1102,7 @@ if not ids_to_fetch: return [] - runs = self.query(self.Run).\ + runs = session.query(self.Run).\ filter(self.Run.machine == run.machine).\ filter(self.Run.order_id.in_(ids_to_fetch)).all() @@ -1122,26 +1115,26 @@ return runs - def get_previous_runs_on_machine(self, run, N): - return self.get_adjacent_runs_on_machine(run, N, direction=-1) + def get_previous_runs_on_machine(self, session, run, N): + return self.get_adjacent_runs_on_machine(session, run, N, direction=-1) - def get_next_runs_on_machine(self, run, N): - return self.get_adjacent_runs_on_machine(run, N, direction=1) + def get_next_runs_on_machine(self, session, run, N): + return self.get_adjacent_runs_on_machine(session, run, N, direction=1) def __repr__(self): - return "{} (on {})".format(self.name, self.v4db.path) + return "TestSuiteDB('%s')" % self.name - def getNumMachines(self): - return self.query(self.Machine).count() + def getNumMachines(self, session): + return session.query(self.Machine).count() - def getNumRuns(self): - return self.query(self.Run).count() + def getNumRuns(self, session): + return session.query(self.Run).count() - def getNumSamples(self): - return self.query(self.Sample).count() + def getNumSamples(self, session): + return session.query(self.Sample).count() - def getNumTests(self): - return self.query(self.Test).count() + def getNumTests(self, session): + return session.query(self.Test).count() def get_field_index(self, sample_field): return self.sample_field_indexes[sample_field.name] Index: lnt/server/db/v4db.py =================================================================== --- lnt/server/db/v4db.py +++ lnt/server/db/v4db.py @@ -17,6 +17,7 @@ from lnt.util import logger from lnt.server.db import testsuite +from sqlalchemy.orm import joinedload, subqueryload import lnt.server.db.util @@ -24,11 +25,8 @@ """ Wrapper object for LNT v0.4+ databases. """ - - _db_updated = set() _engine_lock = threading.Lock() - _engine = {} - + _engines = [] def _load_schema_file(self, session, schema_file): with open(schema_file) as schema_fd: data = yaml.load(schema_fd) @@ -55,7 +53,13 @@ fatal("Could not load schema '%s': %s\n" % (schema_file, e)) # Load schemas from database (deprecated) - ts_list = session.query(testsuite.TestSuite).all() + ts_list = session.query(testsuite.TestSuite) \ + .options(subqueryload(testsuite.TestSuite.sample_fields) + .joinedload(testsuite.SampleField.status_field)) \ + .options(joinedload(testsuite.TestSuite.order_fields)) \ + .options(joinedload(testsuite.TestSuite.run_fields)) \ + .options(joinedload(testsuite.TestSuite.machine_fields)) \ + .all() for suite in ts_list: name = suite.name if name in self.testsuite: @@ -72,55 +76,42 @@ self.path = path self.config = config self.baseline_revision = baseline_revision + connect_args = {} + if path.startswith("sqlite://"): + # Some of the background tasks keep database transactions + # open for a long time. Make it less likely to hit + # "(OperationalError) database is locked" because of that. + connect_args['timeout'] = 30 + self.engine = sqlalchemy.create_engine(path, + connect_args=connect_args) with V4DB._engine_lock: - if path not in V4DB._engine: - connect_args = {} - if path.startswith("sqlite://"): - # Some of the background tasks keep database transactions - # open for a long time. Make it less likely to hit - # "(OperationalError) database is locked" because of that. - connect_args['timeout'] = 30 - engine = sqlalchemy.create_engine(path, - connect_args=connect_args) - V4DB._engine[path] = engine - self.engine = V4DB._engine[path] + V4DB._engines.append(self.engine) # Update the database to the current version, if necessary. Only check # this once per path. - if path not in V4DB._db_updated: - lnt.server.db.migrate.update(self.engine) - V4DB._db_updated.add(path) - - self.session = sqlalchemy.orm.sessionmaker(self.engine)() - - # Add several shortcut aliases. - self.add = self.session.add - self.delete = self.session.delete - self.commit = self.session.commit - self.query = self.session.query - self.rollback = self.session.rollback - - # For parity with the usage of TestSuiteDB, we make our primary model - # classes available as instance variables. - self.SampleType = testsuite.SampleType - self.StatusKind = testsuite.StatusKind - self.TestSuite = testsuite.TestSuite - self.SampleField = testsuite.SampleField + lnt.server.db.migrate.update(self.engine) + + self.sessionmaker = sqlalchemy.orm.sessionmaker(self.engine) + session = self.make_session() self.testsuite = dict() - self._load_schemas(self.session) + self._load_schemas(session) + session.expunge_all() + session.close() def close(self): - if self.session is not None: - self.session.close() + self.engine.dispose() @staticmethod def close_all_engines(): + """Hack for async_ops. Do not use for anything else.""" with V4DB._engine_lock: - for key, engine in V4DB._engine.items(): + for engine in _engines: engine.dispose() - V4DB._engine = {} - V4DB._db_updated = set() + V4DB._engines = [] + + def make_session(self): + return self.sessionmaker() def settings(self): """All the setting needed to recreate this instnace elsewhere.""" Index: lnt/server/instance.py =================================================================== --- lnt/server/instance.py +++ lnt/server/instance.py @@ -65,11 +65,14 @@ self.config_path = config_path self.config = config self.tmpdir = tmpdir + self.databases = dict() + for name in self.config.get_database_names(): + self.databases[name] = self.config.get_database(name) def __del__(self): # If we have a temporary dir, clean it up now. if self.tmpdir is not None: shutil.rmtree(self.tmpdir) - def get_database(self, *args, **kwargs): - return self.config.get_database(*args, **kwargs) + def get_database(self, name): + return self.databases.get(name, None) Index: lnt/server/reporting/analysis.py =================================================================== --- lnt/server/reporting/analysis.py +++ lnt/server/reporting/analysis.py @@ -242,7 +242,7 @@ class RunInfo(object): - def __init__(self, testsuite, runs_to_load, + def __init__(self, session, testsuite, runs_to_load, aggregation_fn=stats.safe_min, confidence_lv=.05, only_tests=None): """Get all the samples needed to build a CR. @@ -257,26 +257,28 @@ self.profile_map = dict() self.loaded_run_ids = set() - self._load_samples_for_runs(runs_to_load, only_tests) + self._load_samples_for_runs(session, runs_to_load, only_tests) @property def test_ids(self): return set(key[1] for key in self.sample_map.keys()) - def get_sliding_runs(self, run, compare_run, num_comparison_runs=0): + def get_sliding_runs(self, session, run, compare_run, + num_comparison_runs=0): """ Get num_comparison_runs most recent runs, This query is expensive. """ runs = [run] runs_prev = self.testsuite \ - .get_previous_runs_on_machine(run, num_comparison_runs) + .get_previous_runs_on_machine(session, run, num_comparison_runs) runs += runs_prev if compare_run is not None: compare_runs = [compare_run] comp_prev = self.testsuite \ - .get_previous_runs_on_machine(compare_run, num_comparison_runs) + .get_previous_runs_on_machine(session, compare_run, + num_comparison_runs) compare_runs += comp_prev else: compare_runs = [] @@ -396,7 +398,7 @@ confidence_lv=0, bigger_is_better=field.bigger_is_better) - def _load_samples_for_runs(self, run_ids, only_tests): + def _load_samples_for_runs(self, session, run_ids, only_tests): # Find the set of new runs to load. to_load = set(run_ids) - self.loaded_run_ids if not to_load: @@ -410,7 +412,7 @@ self.testsuite.Sample.test_id, self.testsuite.Sample.profile_id] columns.extend(f.column for f in self.testsuite.sample_fields) - q = self.testsuite.query(*columns) + q = session.query(*columns) if only_tests: q = q.filter(self.testsuite.Sample.test_id.in_(only_tests)) q = q.filter(self.testsuite.Sample.run_id.in_(to_load)) Index: lnt/server/reporting/dailyreport.py =================================================================== --- lnt/server/reporting/dailyreport.py +++ lnt/server/reporting/dailyreport.py @@ -190,7 +190,7 @@ # Select a key run arbitrarily. return runs[0] - def build(self): + def build(self, session): ts = self.ts # Construct datetime instances for the report range. @@ -209,7 +209,7 @@ for i in range(self.num_prior_days_to_include + 1)] # Find all the runs that occurred for each day slice. - prior_runs = [ts.query(ts.Run). + prior_runs = [session.query(ts.Run). filter(ts.Run.start_time > prior_day). filter(ts.Run.start_time <= day).all() for day, prior_day in _pairs(self.prior_days)] @@ -308,7 +308,7 @@ return # Get the set all tests reported in the recent runs. - self.reporting_tests = ts.query(ts.Test).filter( + self.reporting_tests = session.query(ts.Test).filter( sqlalchemy.sql.exists('*', sqlalchemy.sql.and_( ts.Sample.run_id.in_(relevant_run_ids), ts.Sample.test_id == ts.Test.id))).all() @@ -318,7 +318,8 @@ [r.id for r in less_relevant_runs] # Create a run info object. - sri = lnt.server.reporting.analysis.RunInfo(ts, run_ids_to_load) + sri = lnt.server.reporting.analysis.RunInfo(session, ts, + run_ids_to_load) # Build the result table of tests with interesting results. def compute_visible_results_priority(visible_results): Index: lnt/server/reporting/runs.py =================================================================== --- lnt/server/reporting/runs.py +++ lnt/server/reporting/runs.py @@ -8,8 +8,8 @@ import lnt.util.stats -def generate_run_data(run, baseurl, num_comparison_runs=0, result=None, - compare_to=None, baseline=None, +def generate_run_data(session, run, baseurl, num_comparison_runs=0, + result=None, compare_to=None, baseline=None, aggregation_fn=lnt.util.stats.safe_min, confidence_lv=.05, styles=dict(), classes=dict()): """ @@ -29,7 +29,7 @@ # If a baseline has not been given, look up the run closest to # the default baseline revision for which this machine also # reported. - baseline = machine.get_baseline_run() + baseline = machine.get_baseline_run(session) # If the baseline is the same as the comparison run, ignore it. visible_note = None @@ -41,10 +41,10 @@ # Gather the runs to use for statistical data. comparison_start_run = compare_to or run comparison_window = list(ts.get_previous_runs_on_machine( - comparison_start_run, num_comparison_runs)) + session, comparison_start_run, num_comparison_runs)) if baseline: baseline_window = list(ts.get_previous_runs_on_machine( - baseline, num_comparison_runs)) + session, baseline, num_comparison_runs)) else: baseline_window = [] @@ -63,10 +63,10 @@ if baseline: runs_to_load.add(baseline.id) sri = lnt.server.reporting.analysis.RunInfo( - ts, runs_to_load, aggregation_fn, confidence_lv) + session, ts, runs_to_load, aggregation_fn, confidence_lv) # Get the test names, metric fields and total test counts. - test_names = ts.query(ts.Test.name, ts.Test.id).\ + test_names = session.query(ts.Test.name, ts.Test.id).\ order_by(ts.Test.name).\ filter(ts.Test.id.in_(sri.test_ids)).all() metric_fields = list(ts.Sample.get_metric_fields()) Index: lnt/server/reporting/summaryreport.py =================================================================== --- lnt/server/reporting/summaryreport.py +++ lnt/server/reporting/summaryreport.py @@ -109,7 +109,7 @@ self.warnings = [] - def build(self): + def build(self, session): # Build a per-testsuite list of the machines that match the specified # patterns. def should_be_in_report(machine): @@ -118,9 +118,9 @@ for rex in self.report_machine_rexes: if rex.match(machine.name): return True - self.requested_machines = dict((ts, filter(should_be_in_report, - ts.query(ts.Machine).all())) - for ts in self.testsuites) + self.requested_machines = dict( + (ts, filter(should_be_in_report, session.query(ts.Machine).all())) + for ts in self.testsuites) self.requested_machine_ids = dict( (ts, [m.id for m in machines]) for ts, machines in self.requested_machines.items() @@ -134,7 +134,7 @@ runs = [] for ts in self.testsuites: # Find all the orders that match. - result = ts.query(ts.Order.id).\ + result = session.query(ts.Order.id).\ filter(ts.Order.llvm_project_revision.in_( orders)).all() ts_order_ids = [id for id, in result] @@ -143,7 +143,7 @@ if not ts_order_ids: ts_runs = [] else: - ts_runs = ts.query(ts.Run).\ + ts_runs = session.query(ts.Run).\ filter(ts.Run.order_id.in_(ts_order_ids)).\ filter(ts.Run.machine_id.in_( self.requested_machine_ids[ts])).all() @@ -158,7 +158,7 @@ # Load the tests for each testsuite. self.tests = dict((ts, dict((test.id, test) - for test in ts.query(ts.Test))) + for test in session.query(ts.Test))) for ts in self.testsuites) # Compute the base table for aggregation. @@ -341,7 +341,7 @@ # Load all the samples for all runs we are interested in. columns = [ts.Sample.run_id, ts.Sample.test_id] columns.extend(f.column for f in ts.sample_fields) - samples = ts.query(*columns).filter( + samples = session.query(*columns).filter( ts.Sample.run_id.in_(run_id_map.keys())) for sample in samples: run = run_id_map[sample[0]] Index: lnt/server/ui/api.py =================================================================== --- lnt/server/ui/api.py +++ lnt/server/ui/api.py @@ -25,11 +25,13 @@ g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404, message="Invalid database.") + request.db = current_app.instance.get_database(g.db_name) + request.session = request.db.make_session() # Compute result. result = func(*args, **kwargs) # Make sure that any transactions begun by this request are finished. - request.get_db().rollback() + request.session.rollback() return result return wrap @@ -90,7 +92,8 @@ @staticmethod def get(): ts = request.get_testsuite() - machines = ts.query(ts.Machine).all() + session = request.session + machines = session.query(ts.Machine).all() result = common_fields_factory() result['machines'] = machines @@ -104,12 +107,13 @@ @staticmethod def _get_machine(machine_spec): ts = request.get_testsuite() + session = request.session # Assume id number if machine_spec is numeric, otherwise a name. if machine_spec.isdigit(): - machine = ts.query(ts.Machine) \ + machine = session.query(ts.Machine) \ .filter(ts.Machine.id == machine_spec).first() else: - machines = ts.query(ts.Machine) \ + machines = session.query(ts.Machine) \ .filter(ts.Machine.name == machine_spec).all() if len(machines) == 0: machine = None @@ -123,8 +127,9 @@ @staticmethod def get(machine_spec): ts = request.get_testsuite() + session = request.session machine = Machine._get_machine(machine_spec) - machine_runs = ts.query(ts.Run) \ + machine_runs = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine.id) \ .options(joinedload(ts.Run.order)) \ .all() @@ -140,17 +145,18 @@ @requires_auth_token def delete(machine_spec): ts = request.get_testsuite() + session = request.session machine = Machine._get_machine(machine_spec) - # Just saying ts.session.delete(machine) takes a long time and risks + # Just saying session.delete(machine) takes a long time and risks # running into OOM or timeout situations for machines with a hundreds # of runs. So instead remove machine runs in chunks. def perform_delete(ts, machine): - count = ts.query(ts.Run) \ + count = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine.id).count() at = 0 while True: - runs = ts.query(ts.Run) \ + runs = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine.id) \ .options(joinedload(ts.Run.samples)) \ .options(joinedload(ts.Run.fieldchanges)) \ @@ -163,12 +169,12 @@ logger.info(msg) yield msg + '\n' for run in runs: - ts.session.delete(run) - ts.commit() + session.delete(run) + session.commit() machine_name = "%s:%s" % (machine.name, machine.id) - ts.session.delete(machine) - ts.commit() + session.delete(machine) + session.commit() msg = "Deleted machine %s" % machine_name logger.info(msg) yield msg + '\n' @@ -184,12 +190,14 @@ data = json.loads(request.data) machine_data = data['machine'] machine.set_from_dict(machine_data) + session = request.session ts = request.get_testsuite() - ts.commit() + session.commit() @staticmethod @requires_auth_token def post(machine_spec): + session = request.session ts = request.get_testsuite() machine = Machine._get_machine(machine_spec) machine_name = "%s:%s" % (machine.name, machine.id) @@ -201,12 +209,13 @@ name = request.values.get('name', None) if name is None: abort(400, msg="Expected 'name' for rename request") - existing = ts.query(ts.Machine).filter(ts.Machine.name == name) \ + existing = session.query(ts.Machine) \ + .filter(ts.Machine.name == name) \ .first() if existing is not None: abort(400, msg="Machine with name '%s' already exists" % name) machine.name = name - ts.session.commit() + session.commit() logger.info("Renamed machine %s to %s" % (machine_name, name)) elif action == 'merge': into_id = request.values.get('into', None) @@ -214,15 +223,15 @@ abort(400, msg="Expected 'into' for merge request") into = Machine._get_machine(into_id) into_name = "%s:%s" % (into.name, into.id) - ts.query(ts.Run) \ + session.query(ts.Run) \ .filter(ts.Run.machine_id == machine.id) \ .update({ts.Run.machine_id: into.id}, synchronize_session=False) - ts.session.expire_all() # be safe after synchronize_session==False + session.expire_all() # be safe after synchronize_session==False # re-query Machine so we can delete it. machine = Machine._get_machine(machine_spec) - ts.delete(machine) - ts.session.commit() + session.delete(machine) + session.commit() logger.info("Merged machine %s into %s" % (machine_name, into_name)) logger.info("Deleted machine %s" % machine_name) @@ -235,9 +244,10 @@ @staticmethod def get(run_id): + session = request.session ts = request.get_testsuite() try: - run = ts.query(ts.Run) \ + run = session.query(ts.Run) \ .filter(ts.Run.id == run_id) \ .options(joinedload(ts.Run.machine)) \ .options(joinedload(ts.Run.order)) \ @@ -249,7 +259,7 @@ for f in ts.sample_fields: to_get.append(f.column) - sample_query = ts.query(*to_get) \ + sample_query = session.query(*to_get) \ .join(ts.Test) \ .filter(ts.Sample.run_id == run_id) \ .all() @@ -267,12 +277,13 @@ @staticmethod @requires_auth_token def delete(run_id): + session = request.session ts = request.get_testsuite() - run = ts.query(ts.Run).filter(ts.Run.id == run_id).first() + run = session.query(ts.Run).filter(ts.Run.id == run_id).first() if run is None: abort(404, msg="Did not find run " + str(run_id)) - ts.delete(run) - ts.commit() + session.delete(run) + session.commit() logger.info("Deleted run %s" % (run_id,)) @@ -284,13 +295,14 @@ @requires_auth_token def post(): """Add a new run into the lnt database""" + session = request.session db = request.get_db() data = request.data updateMachine = request.values.get('update_machine', False) merge = request.values.get('merge', 'replace') result = lnt.util.ImportData.import_from_string( - current_app.old_config, g.db_name, db, g.testsuite_name, data, - updateMachine=updateMachine, mergeRun=merge) + current_app.old_config, g.db_name, db, session, g.testsuite_name, + data, updateMachine=updateMachine, mergeRun=merge) error = result['error'] if error is not None: @@ -315,9 +327,11 @@ @staticmethod def get(order_id): + session = request.session ts = request.get_testsuite() try: - order = ts.query(ts.Order).filter(ts.Order.id == order_id).one() + order = session.query(ts.Order) \ + .filter(ts.Order.id == order_id).one() except NoResultFound: abort(404, message="Invalid order.") result = common_fields_factory() @@ -330,9 +344,10 @@ @staticmethod def get(sample_id): + session = request.session ts = request.get_testsuite() try: - sample = ts.query(ts.Sample) \ + sample = session.query(ts.Sample) \ .filter(ts.Sample.id == sample_id) \ .one() except NoResultFound: @@ -349,6 +364,7 @@ @staticmethod def get(): """Get the data for a particular line in a graph.""" + session = request.session ts = request.get_testsuite() args = request.args.to_dict(flat=False) # Maybe we don't need to do this? @@ -366,7 +382,7 @@ for f in ts.sample_fields: to_get.append(f.column) - q = ts.query(*to_get) \ + q = session.query(*to_get) \ .join(ts.Test) \ .join(ts.Run) \ .join(ts.Order) \ @@ -387,20 +403,21 @@ @staticmethod def get(machine_id, test_id, field_index): """Get the data for a particular line in a graph.""" + session = request.session ts = request.get_testsuite() # Maybe we don't need to do this? try: - machine = ts.query(ts.Machine) \ + machine = session.query(ts.Machine) \ .filter(ts.Machine.id == machine_id) \ .one() - test = ts.query(ts.Test) \ + test = session.query(ts.Test) \ .filter(ts.Test.id == test_id) \ .one() field = ts.sample_fields[field_index] except NoResultFound: abort(404) - q = ts.query(field.column, ts.Order.llvm_project_revision, + q = session.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id) \ .join(ts.Run) \ .join(ts.Order) \ @@ -435,10 +452,11 @@ @staticmethod def get(machine_id, test_id, field_index): """Get the regressions for a particular line in a graph.""" + session = request.session ts = request.get_testsuite() field = ts.sample_fields[field_index] # Maybe we don't need to do this? - fcs = ts.query(ts.FieldChange) \ + fcs = session.query(ts.FieldChange) \ .filter(ts.FieldChange.machine_id == machine_id) \ .filter(ts.FieldChange.test_id == test_id) \ .filter(ts.FieldChange.field_id == field.id) \ @@ -451,10 +469,10 @@ # If we don't find anything, lets see if we are even looking # for a valid thing to provide a nice error. try: - ts.query(ts.Machine) \ + session.query(ts.Machine) \ .filter(ts.Machine.id == machine_id) \ .one() - ts.query(ts.Test) \ + session.query(ts.Test) \ .filter(ts.Test.id == test_id) \ .one() _ = ts.sample_fields[field_index] @@ -462,9 +480,9 @@ abort(404) # I think we found nothing. return [] - regressions = ts.query(ts.Regression.title, ts.Regression.id, - ts.RegressionIndicator.field_change_id, - ts.Regression.state) \ + regressions = session.query(ts.Regression.title, ts.Regression.id, + ts.RegressionIndicator.field_change_id, + ts.Regression.state) \ .join(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.field_change_id.in_(fc_ids)) \ .all() Index: lnt/server/ui/app.py =================================================================== --- lnt/server/ui/app.py +++ lnt/server/ui/app.py @@ -91,27 +91,15 @@ # Utility Methods def get_db(self): - """ - get_db() -> - - Get the active database and add a logging handler if part of the - request arguments. - """ - - if self.db is None: - try: - self.db = current_app.old_config.get_database(g.db_name) - except DatabaseError: - self.db = current_app.old_config.get_database(g.db_name) - # Enable SQL logging with db_log. - # - # FIXME: Conditionalize on an is_production variable. - show_sql = bool(self.args.get('db_log') or self.form.get('db_log')) - if show_sql: - g.db_log = StringIO.StringIO() - logger = logging.getLogger("sqlalchemy") - logger.addHandler(logging.StreamHandler(g.db_log)) - + assert self.db is not None + # Enable SQL logging with db_log. + # + # FIXME: Conditionalize on an is_production variable. + show_sql = bool(self.args.get('db_log') or self.form.get('db_log')) + if show_sql: + g.db_log = StringIO.StringIO() + logger = logging.getLogger("sqlalchemy") + logger.addHandler(logging.StreamHandler(g.db_log)) return self.db def get_testsuite(self): @@ -122,7 +110,7 @@ """ if self.testsuite is None: - testsuites = self.get_db().testsuite + testsuites = self.db.testsuite if g.testsuite_name not in testsuites: flask.abort(404) Index: lnt/server/ui/decorators.py =================================================================== --- lnt/server/ui/decorators.py +++ lnt/server/ui/decorators.py @@ -23,13 +23,15 @@ g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404) + request.db = current_app.instance.get_database(g.db_name) + request.session = request.db.make_session() # Compute result. result = f(**args) # Make sure that any transactions begun by this request are # finished. - request.get_db().rollback() + request.session.rollback() # Return result. return result @@ -60,13 +62,15 @@ g.db_info = current_app.old_config.databases.get(g.db_name) if g.db_info is None: abort(404) + request.db = current_app.instance.get_database(g.db_name) + request.session = request.db.make_session() # Compute result. result = f(**args) # Make sure that any transactions begun by this request are # finished. - request.get_db().rollback() + request.session.rollback() # Return result. return result Index: lnt/server/ui/profile_views.py =================================================================== --- lnt/server/ui/profile_views.py +++ lnt/server/ui/profile_views.py @@ -64,6 +64,7 @@ @v4_route("/profile/ajax/getFunctions") def v4_profile_ajax_getFunctions(): + session = request.session ts = request.get_testsuite() runid = request.args.get('runid') testid = request.args.get('testid') @@ -72,7 +73,7 @@ idx = 0 tlc = {} - sample = ts.query(ts.Sample) \ + sample = session.query(ts.Sample) \ .filter(ts.Sample.run_id == runid) \ .filter(ts.Sample.test_id == testid).first() if sample and sample.profile: @@ -84,6 +85,7 @@ @v4_route("/profile/ajax/getTopLevelCounters") def v4_profile_ajax_getTopLevelCounters(): + session = request.session ts = request.get_testsuite() runids = request.args.get('runids').split(',') testid = request.args.get('testid') @@ -93,7 +95,7 @@ idx = 0 tlc = {} for rid in runids: - sample = ts.query(ts.Sample) \ + sample = session.query(ts.Sample) \ .filter(ts.Sample.run_id == rid) \ .filter(ts.Sample.test_id == testid).first() if sample and sample.profile: @@ -111,6 +113,7 @@ @v4_route("/profile/ajax/getCodeForFunction") def v4_profile_ajax_getCodeForFunction(): + session = request.session ts = request.get_testsuite() runid = request.args.get('runid') testid = request.args.get('testid') @@ -118,7 +121,7 @@ profileDir = current_app.old_config.profileDir - sample = ts.query(ts.Sample) \ + sample = session.query(ts.Sample) \ .filter(ts.Sample.run_id == runid) \ .filter(ts.Sample.test_id == testid).first() if not sample or not sample.profile: @@ -139,18 +142,19 @@ def v4_profile(testid, run1_id, run2_id=None): + session = request.session ts = request.get_testsuite() profileDir = current_app.old_config.profileDir try: - test = ts.query(ts.Test).filter(ts.Test.id == testid).one() - run1 = ts.query(ts.Run).filter(ts.Run.id == run1_id).one() - sample1 = ts.query(ts.Sample) \ + test = session.query(ts.Test).filter(ts.Test.id == testid).one() + run1 = session.query(ts.Run).filter(ts.Run.id == run1_id).one() + sample1 = session.query(ts.Sample) \ .filter(ts.Sample.run_id == run1_id) \ .filter(ts.Sample.test_id == testid).first() if run2_id is not None: - run2 = ts.query(ts.Run).filter(ts.Run.id == run2_id).one() - sample2 = ts.query(ts.Sample) \ + run2 = session.query(ts.Run).filter(ts.Run.id == run2_id).one() + sample2 = session.query(ts.Sample) \ .filter(ts.Sample.run_id == run2_id) \ .filter(ts.Sample.test_id == testid).first() else: Index: lnt/server/ui/regression_views.py =================================================================== --- lnt/server/ui/regression_views.py +++ lnt/server/ui/regression_views.py @@ -45,17 +45,20 @@ name = StringField('name', validators=[DataRequired()]) -def get_fieldchange(ts, id): - return ts.query(ts.FieldChange).filter(ts.FieldChange.id == id).one() +def get_fieldchange(session, ts, id): + return session.query(ts.FieldChange) \ + .filter(ts.FieldChange.id == id) \ + .one() @v4_route("/regressions/new", methods=["GET", "POST"]) def v4_new_regressions(): form = TriagePageSelectedForm(request.form) + session = request.session ts = request.get_testsuite() if request.method == 'POST' and \ request.form['btn'] == "Create New Regression": - regression = new_regression(ts, form.field_changes.data) + regression = new_regression(session, ts, form.field_changes.data) flash("Created " + regression.title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", highlight=regression.id)) @@ -64,15 +67,15 @@ ignored = [] for fc_id in form.field_changes.data: ignored.append(str(fc_id)) - fc = get_fieldchange(ts, fc_id) + fc = get_fieldchange(session, ts, fc_id) ignored_change = ts.ChangeIgnore(fc) - ts.add(ignored_change) - ts.commit() + session.add(ignored_change) + session.commit() flash(msg + ", ".join(ignored), FLASH_SUCCESS) # d = datetime.datetime.now() # two_weeks_ago = d - datetime.timedelta(days=14) - recent_fieldchange = ts.query(ts.FieldChange) \ + recent_fieldchange = session.query(ts.FieldChange) \ .join(ts.Test) \ .outerjoin(ts.ChangeIgnore) \ .filter(ts.ChangeIgnore.id.is_(None)) \ @@ -86,12 +89,13 @@ form.field_changes.choices = list() for fc in recent_fieldchange: if fc.old_value is None: - cr, key_run, _ = get_cr_for_field_change(ts, fc) + cr, key_run, _ = get_cr_for_field_change(session, ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) key_run = get_first_runs_of_fieldchange(ts, fc) - current_cr, _, _ = get_cr_for_field_change(ts, fc, current=True) + current_cr, _, _ = get_cr_for_field_change(session, ts, fc, + current=True) crs.append(ChangeData(fc, cr, key_run, current_cr)) form.field_changes.choices.append((fc.id, 1,)) return render_template("v4_new_regressions.html", @@ -100,13 +104,13 @@ form=form, **ts_data(ts)) -def calc_impact(ts, fcs): +def calc_impact(session, ts, fcs): crs = [] for fc in fcs: if fc is None: continue if fc.old_value is None: - cr, _, _ = get_cr_for_field_change(ts, fc) + cr, _, _ = get_cr_for_field_change(session, ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) @@ -137,7 +141,7 @@ @v4_route("/regressions/", methods=["GET", "POST"]) def v4_regression_list(): - + session = request.session ts = request.get_testsuite() form = MergeRegressionForm(request.form) machine_filter = request.args.get('machine_filter') @@ -145,7 +149,8 @@ # Merge requested regressions. if request.method == 'POST' and \ request.form['merge_btn'] == "Merge Regressions": - reg_inds, regressions = _get_regressions_from_selected_form(form, ts) + reg_inds, regressions = _get_regressions_from_selected_form(session, + form, ts) links = [] target = 0 for i, r in enumerate(regressions): @@ -153,7 +158,8 @@ target = i links.append(r.bug) - new_regress = new_regression(ts, [x.field_change_id for x in reg_inds]) + new_regress = new_regression(session, ts, + [x.field_change_id for x in reg_inds]) new_regress.state = regressions[target].state new_regress.title = regressions[target].title new_regress.bug = ' '.join(links) @@ -161,25 +167,26 @@ r.bug = v4_url_for(".v4_regression_detail", id=new_regress.id) r.title = "Merged into Regression " + str(new_regress.id) r.state = RegressionState.IGNORED - [ts.delete(x) for x in reg_inds] + [session.delete(x) for x in reg_inds] - ts.commit() + session.commit() flash("Created: " + new_regress.title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_detail", id=new_regress.id)) # Delete requested regressions. if request.method == 'POST' and \ request.form['merge_btn'] == "Delete Regressions": - reg_inds, regressions = _get_regressions_from_selected_form(form, ts) + reg_inds, regressions = _get_regressions_from_selected_form(session, + form, ts) titles = [r.title for r in regressions] for res_ind in reg_inds: - ts.delete(res_ind) + session.delete(res_ind) for reg in regressions: - ts.delete(reg) - ts.commit() + session.delete(reg) + session.commit() flash(' Deleted: '.join(titles), FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", state=state_filter)) - q = ts.query(ts.Regression) + q = session.query(ts.Regression) title = "All Regressions" if state_filter != -1: q = q.filter(ts.Regression.state == state_filter) @@ -193,7 +200,7 @@ filtered_regressions = [] for regression in regression_info: - reg_inds = ts.query(ts.RegressionIndicator) \ + reg_inds = session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == regression.id) \ .all() @@ -209,7 +216,8 @@ form.regression_checkboxes.choices.append((regression.id, 1,)) regression_sizes.append(len(reg_inds)) - impacts.append(calc_impact(ts, [x.field_change for x in reg_inds])) + impacts.append(calc_impact(session, ts, + [x.field_change for x in reg_inds])) # Now guess the regression age: if len(reg_inds) and reg_inds[0].field_change and \ reg_inds[0].field_change.run: @@ -233,11 +241,11 @@ **ts_data(ts)) -def _get_regressions_from_selected_form(form, ts): +def _get_regressions_from_selected_form(session, form, ts): regressions_id_to_merge = form.regression_checkboxes.data - regressions = ts.query(ts.Regression) \ + regressions = session.query(ts.Regression) \ .filter(ts.Regression.id.in_(regressions_id_to_merge)).all() - reg_inds = ts.query(ts.RegressionIndicator) \ + reg_inds = session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id.in_( regressions_id_to_merge)) \ .all() @@ -274,11 +282,12 @@ @v4_route("/regressions/", methods=["GET", "POST"]) def v4_regression_detail(id): + session = request.session ts = request.get_testsuite() form = EditRegressionForm(request.form) try: - regression_info = ts.query(ts.Regression) \ + regression_info = session.query(ts.Regression) \ .filter(ts.Regression.id == id) \ .one() except NoResultFound as e: @@ -287,7 +296,7 @@ regression_info.title = form.title.data regression_info.bug = form.bug.data regression_info.state = form.state.data - ts.commit() + session.commit() flash("Updated " + regression_info.title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", highlight=regression_info.id, @@ -295,19 +304,19 @@ if request.method == 'POST' and \ request.form['save_btn'] == "Split Regression": # For each of the regression indicators, grab their field ids. - res_inds = ts.query(ts.RegressionIndicator) \ + res_inds = session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.field_change_id.in_( form.field_changes.data)) \ .all() fc_ids = [x.field_change_id for x in res_inds] - second_regression = new_regression(ts, fc_ids) + second_regression = new_regression(session, ts, fc_ids) second_regression.state = regression_info.state # Now remove our links to this regression. for res_ind in res_inds: - ts.delete(res_ind) + session.delete(res_ind) lnt.server.db.fieldchange.rebuild_title(ts, regression_info) - ts.commit() + session.commit() flash("Split " + second_regression.title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", highlight=second_regression.id, @@ -315,15 +324,15 @@ if request.method == 'POST' and request.form['save_btn'] == "Delete": # For each of the regression indicators, grab their field ids. title = regression_info.title - res_inds = ts.query(ts.RegressionIndicator) \ + res_inds = session.query(ts.RegressionIndicator) \ .filter( ts.RegressionIndicator.regression_id == regression_info.id) \ .all() # Now remove our links to this regression. for res_ind in res_inds: - ts.delete(res_ind) - ts.delete(regression_info) - ts.commit() + session.delete(res_ind) + session.delete(regression_info) + session.commit() flash("Deleted " + title, FLASH_SUCCESS) return redirect(v4_url_for(".v4_regression_list", state=int(form.edit_state.data))) @@ -333,7 +342,7 @@ form.edit_state.data = regression_info.state form.title.data = regression_info.title form.bug.data = regression_info.bug - regression_indicators = ts.query(ts.RegressionIndicator) \ + regression_indicators = session.query(ts.RegressionIndicator) \ .filter(ts.RegressionIndicator.regression_id == id) \ .all() @@ -351,12 +360,13 @@ if fc is None: continue if fc.old_value is None: - cr, key_run, all_runs = get_cr_for_field_change(ts, fc) + cr, key_run, all_runs = get_cr_for_field_change(session, ts, fc) else: cr = PrecomputedCR(fc.old_value, fc.new_value, fc.field.bigger_is_better) key_run = get_first_runs_of_fieldchange(ts, fc) - current_cr, _, all_runs = get_cr_for_field_change(ts, fc, current=True) + current_cr, _, all_runs = get_cr_for_field_change(session, ts, fc, + current=True) crs.append(ChangeData(fc, cr, key_run, current_cr)) form.field_changes.choices.append((fc.id, checkbox_state,)) for run in all_runs: @@ -384,8 +394,9 @@ @v4_route("/hook", methods=["GET"]) def v4_hook(): + session = request.session ts = request.get_testsuite() - rule_hooks.post_submission_hooks(ts, 0) + rule_hooks.post_submission_hooks(session, ts, 0) abort(400) @@ -398,12 +409,13 @@ so we must create a regression, bypassing the normal analysis. """ + session = request.session ts = request.get_testsuite() field = ts.sample_fields[field_index] new_regression_id = 0 - run = ts.query(ts.Run).get(run_id) + run = session.query(ts.Run).get(run_id) - runs = ts.query(ts.Run). \ + runs = session.query(ts.Run). \ filter(ts.Run.order_id == run.order_id). \ filter(ts.Run.machine_id == run.machine_id). \ all() @@ -411,7 +423,7 @@ if len(runs) == 0: abort(404) - previous_runs = ts.get_previous_runs_on_machine(run, 1) + previous_runs = ts.get_previous_runs_on_machine(session, run, 1) # Find our start/end order. if previous_runs != []: @@ -423,46 +435,43 @@ # Load our run data for the creation of the new fieldchanges. runs_to_load = [r.id for r in (runs + previous_runs)] - runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load) + runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, runs_to_load) result = runinfo.get_comparison_result( runs, previous_runs, test_id, field, ts.Sample.get_hash_of_binary_field()) # Try and find a matching FC and update, else create one. - f = None - try: - f = ts.query(ts.FieldChange) \ + f = session.query(ts.FieldChange) \ .filter(ts.FieldChange.start_order == start_order) \ .filter(ts.FieldChange.end_order == end_order) \ .filter(ts.FieldChange.test_id == test_id) \ .filter(ts.FieldChange.machine == run.machine) \ - .filter(ts.FieldChange.field == field) \ + .filter(ts.FieldChange.field_id == field.id) \ .one() except sqlalchemy.orm.exc.NoResultFound: - f = None - - if not f: - test = ts.query(ts.Test).filter(ts.Test.id == test_id).one() + # Create one + test = session.query(ts.Test).filter(ts.Test.id == test_id).one() f = ts.FieldChange(start_order=start_order, end_order=run.order, machine=run.machine, test=test, - field=field) - ts.add(f) + field_id=field.id) + session.add(f) + # Always update FCs with new values. if f: f.old_value = result.previous f.new_value = result.current f.run = run - ts.commit() + session.commit() # Make new regressions. - regression = new_regression(ts, [f.id]) + regression = new_regression(session, ts, [f.id]) regression.state = RegressionState.ACTIVE - ts.commit() + session.commit() logger.info("Manually created new regressions: {}".format(regression.id)) flash("Created " + regression.title, FLASH_SUCCESS) Index: lnt/server/ui/templates/v4_run.html =================================================================== --- lnt/server/ui/templates/v4_run.html +++ lnt/server/ui/templates/v4_run.html @@ -321,7 +321,7 @@ {% set field_index = ts.get_field_index(field) %}
{% set tests = [] %} - {% set (runs, compare_runs) = request_info.sri.get_sliding_runs(run, compare_to, request_info.num_comparison_runs) %} + {% set (runs, compare_runs) = request_info.sri.get_sliding_runs(session, run, compare_to, request_info.num_comparison_runs) %} {% for test_name,test_id in test_info %} {% set cr = request_info.sri.get_comparison_result( runs, compare_runs, test_id, field, hash_field) %} Index: lnt/server/ui/views.py =================================================================== --- lnt/server/ui/views.py +++ lnt/server/ui/views.py @@ -15,7 +15,6 @@ from flask import redirect from flask import render_template from flask import request, url_for -from flask import session from flask_wtf import Form from sqlalchemy.orm import joinedload from sqlalchemy.orm.exc import NoResultFound @@ -139,11 +138,12 @@ g.testsuite_name = 'nts' # Get a DB connection. + session = request.session db = request.get_db() result = lnt.util.ImportData.import_from_string( - current_app.old_config, g.db_name, db, g.testsuite_name, data_value, - updateMachine=updateMachine, mergeRun=merge) + current_app.old_config, g.db_name, db, session, g.testsuite_name, + data_value, updateMachine=updateMachine, mergeRun=merge) # It is nice to have a full URL to the run, so fixup the request URL # here were we know more about the flask instance. @@ -161,8 +161,8 @@ def ts_data(ts): """Data about the current testsuite used by layout.html which should be present in most templates.""" - baseline_id = session.get(baseline_key()) - baselines = ts.query(ts.Baseline).all() + baseline_id = flask.session.get(baseline_key()) + baselines = request.session.query(ts.Baseline).all() return { 'baseline_id': baseline_id, 'baselines': baselines, @@ -204,11 +204,12 @@ @v4_route("/recent_activity") def v4_recent_activity(): + session = request.session ts = request.get_testsuite() # Get the most recent runs in this tag, we just arbitrarily limit to # looking at the last 100 submission. - recent_runs = ts.query(ts.Run) \ + recent_runs = session.query(ts.Run) \ .options(joinedload(ts.Run.order)) \ .options(joinedload(ts.Run.machine)) \ .order_by(ts.Run.start_time.desc()).limit(100) @@ -237,8 +238,9 @@ # Compute the list of associated runs, grouped by order. # Gather all the runs on this machine. + session = request.session ts = request.get_testsuite() - machines = ts.query(ts.Machine) + machines = session.query(ts.Machine) return render_template("all_machines.html", machines=machines, **ts_data(ts)) @@ -247,9 +249,10 @@ @v4_route("/machine//latest") def v4_machine_latest(machine_id): """Return the most recent run on this machine.""" + session = request.session ts = request.get_testsuite() - run = ts.query(ts.Run) \ + run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .order_by(ts.Run.start_time.desc()) \ .first() @@ -259,14 +262,15 @@ @v4_route("/machine//compare") def v4_machine_compare(machine_id): """Return the most recent run on this machine.""" + session = request.session ts = request.get_testsuite() machine_compare_to_id = int(request.args['compare_to_id']) - machine_1_run = ts.query(ts.Run) \ + machine_1_run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .order_by(ts.Run.start_time.desc()) \ .first() - machine_2_run = ts.query(ts.Run) \ + machine_2_run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_compare_to_id) \ .order_by(ts.Run.start_time.desc()) \ .first() @@ -282,11 +286,12 @@ from lnt.server.ui import util # Gather all the runs on this machine. + session = request.session ts = request.get_testsuite() associated_runs = multidict.multidict( (run_order, r) - for r, run_order in (ts.query(ts.Run, ts.Order) + for r, run_order in (session.query(ts.Run, ts.Order) .join(ts.Order) .filter(ts.Run.machine_id == id) .order_by(ts.Run.start_time.desc()))) @@ -294,7 +299,7 @@ associated_runs.sort() try: - machine = ts.query(ts.Machine).filter(ts.Machine.id == id).one() + machine = session.query(ts.Machine).filter(ts.Machine.id == id).one() except NoResultFound: abort(404) @@ -311,7 +316,7 @@ run.end_time.isoformat())) return flask.jsonify(**json_obj) - machines = ts.query(ts.Machine).all() + machines = session.query(ts.Machine).all() relatives = [m for m in machines if m.name == machine.name] return render_template("v4_machine.html", testsuite_name=g.testsuite_name, @@ -325,9 +330,11 @@ class V4RequestInfo(object): def __init__(self, run_id): + session = request.session self.db = request.get_db() + self.session = session self.ts = ts = request.get_testsuite() - self.run = run = ts.query(ts.Run).filter_by(id=run_id).first() + self.run = run = session.query(ts.Run).filter_by(id=run_id).first() if run is None: abort(404) @@ -345,8 +352,8 @@ self.confidence_lv = confidence_lv # Find the neighboring runs, by order. - prev_runs = list(ts.get_previous_runs_on_machine(run, N=3)) - next_runs = list(ts.get_next_runs_on_machine(run, N=3)) + prev_runs = list(ts.get_previous_runs_on_machine(session, run, N=3)) + next_runs = list(ts.get_next_runs_on_machine(session, run, N=3)) self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs # Select the comparison run as either the previous run, or a user @@ -354,15 +361,19 @@ compare_to_str = request.args.get('compare_to') if compare_to_str: compare_to_id = int(compare_to_str) - compare_to = ts.query(ts.Run).filter_by(id=compare_to_id).first() + compare_to = session.query(ts.Run) \ + .filter_by(id=compare_to_id) \ + .first() if compare_to is None: flash("Comparison Run is invalid: " + compare_to_str, FLASH_DANGER) else: self.comparison_neighboring_runs = ( - list(ts.get_next_runs_on_machine(compare_to, N=3))[::-1] + + list(ts.get_next_runs_on_machine(session, compare_to, + N=3))[::-1] + [compare_to] + - list(ts.get_previous_runs_on_machine(compare_to, N=3))) + list(ts.get_previous_runs_on_machine(session, compare_to, + N=3))) else: if prev_runs: compare_to = prev_runs[0] @@ -380,7 +391,7 @@ baseline_str = request.args.get('baseline') if baseline_str: baseline_id = int(baseline_str) - baseline = ts.query(ts.Run).filter_by(id=baseline_id).first() + baseline = session.query(ts.Run).filter_by(id=baseline_id).first() if baseline is None: flash("Could not find baseline " + baseline_str, FLASH_DANGER) else: @@ -403,7 +414,7 @@ } self.data = lnt.server.reporting.runs.generate_run_data( - self.run, baseurl=db_url_for('.index', _external=True), + session, self.run, baseurl=db_url_for('.index', _external=True), result=None, compare_to=compare_to, baseline=baseline, num_comparison_runs=self.num_comparison_runs, aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv, @@ -439,7 +450,7 @@ ts = db.testsuite[tag] # Look for a matched run. - matched_run = ts.query(ts.Run).\ + matched_run = session.query(ts.Run).\ filter(ts.Run.simple_run_id == id).\ first() @@ -458,6 +469,7 @@ def v4_run(id): info = V4RequestInfo(id) + session = info.session ts = info.ts run = info.run @@ -495,7 +507,7 @@ options['aggregation_fn'] = request.args.get('aggregation_fn', 'min') # Get the test names. - test_info = ts.query(ts.Test.name, ts.Test.id).\ + test_info = session.query(ts.Test.name, ts.Test.id).\ order_by(ts.Test.name).all() # Filter the list of tests by name, if requested. @@ -507,8 +519,8 @@ if request.args.get('json'): json_obj = dict() - sri = lnt.server.reporting.analysis.RunInfo(ts, [id]) - reported_tests = ts.query(ts.Test.name, ts.Test.id).\ + sri = lnt.server.reporting.analysis.RunInfo(session, ts, [id]) + reported_tests = session.query(ts.Test.name, ts.Test.id).\ filter(ts.Run.id == id).\ filter(ts.Test.id.in_(sri.test_ids)).all() order = run.order.as_ordered_string() @@ -553,28 +565,29 @@ def v4_order(id): """Order page details order information, as well as runs that are in this order as well setting this run as a baseline.""" + session = request.session ts = request.get_testsuite() form = PromoteOrderToBaseline() if form.validate_on_submit(): try: - baseline = ts.query(ts.Baseline) \ + baseline = session.query(ts.Baseline) \ .filter(ts.Baseline.order_id == id) \ .one() except NoResultFound: baseline = ts.Baseline() if form.demote.data: - ts.session.delete(baseline) - ts.session.commit() + session.delete(baseline) + session.commit() flash("Baseline demoted.", FLASH_SUCCESS) else: baseline.name = form.name.data baseline.comment = form.description.data baseline.order_id = id - ts.session.add(baseline) - ts.session.commit() + session.add(baseline) + session.commit() flash("Baseline {} updated.".format(baseline.name), FLASH_SUCCESS) return redirect(v4_url_for(".v4_order", id=id)) @@ -582,7 +595,7 @@ print form.errors try: - baseline = ts.query(ts.Baseline) \ + baseline = session.query(ts.Baseline) \ .filter(ts.Baseline.order_id == id) \ .one() form.name.data = baseline.name @@ -591,20 +604,20 @@ pass # Get the order. - order = ts.query(ts.Order).filter(ts.Order.id == id).first() + order = session.query(ts.Order).filter(ts.Order.id == id).first() if order is None: abort(404) previous_order = None if order.previous_order_id: - previous_order = ts.query(ts.Order) \ + previous_order = session.query(ts.Order) \ .filter(ts.Order.id == order.previous_order_id).one() next_order = None if order.next_order_id: - next_order = ts.query(ts.Order) \ + next_order = session.query(ts.Order) \ .filter(ts.Order.id == order.next_order_id).one() - runs = ts.query(ts.Run) \ + runs = session.query(ts.Run) \ .filter(ts.Run.order_id == id) \ .options(joinedload(ts.Run.machine)) \ .all() @@ -619,12 +632,13 @@ @v4_route("/set_baseline/") def v4_set_baseline(id): """Update the baseline stored in the user's session.""" + session = request.session ts = request.get_testsuite() - base = ts.query(ts.Baseline).get(id) + base = session.query(ts.Baseline).get(id) if not base: return abort(404) flash("Baseline set to " + base.name, FLASH_SUCCESS) - session[baseline_key()] = id + flask.session[baseline_key()] = id return redirect(get_redirect_target()) @@ -632,10 +646,11 @@ @v4_route("/all_orders") def v4_all_orders(): # Get the testsuite. + session = request.session ts = request.get_testsuite() # Get the orders. - orders = ts.query(ts.Order).all() + orders = session.query(ts.Order).all() # Order the runs totally. orders.sort() @@ -648,8 +663,9 @@ # This is an old style endpoint that treated graphs as associated with # runs. Redirect to the new endpoint. + session = request.session ts = request.get_testsuite() - run = ts.query(ts.Run).filter_by(id=id).first() + run = session.query(ts.Run).filter_by(id=id).first() if run is None: abort(404) @@ -693,8 +709,9 @@ :return: a redirect to the graph page for that sample and field. """ + session = request.session ts = request.get_testsuite() - target_sample = ts.query(ts.Sample).get(sample_id) + target_sample = session.query(ts.Sample).get(sample_id) if not target_sample: abort(404, "Could not find sample id: {}".format(sample_id)) @@ -706,7 +723,7 @@ break if not field: abort(400, "Could not find field {}".format(field_name)) - field_index = field.index + field_index = ts.get_field_index(field) kwargs = {'plot.0': '{machine_id}.{test_id}.{field_id}'.format( machine_id=target_sample.run.machine.id, @@ -726,25 +743,28 @@ from lnt.util import stats from lnt.external.stats import stats as ext_stats + session = request.session ts = request.get_testsuite() switch_min_mean_local = False - if 'switch_min_mean_session' not in session: - session['switch_min_mean_session'] = False + if 'switch_min_mean_session' not in flask.session: + flask.session['switch_min_mean_session'] = False # Parse the view options. options = {'min_mean_checkbox': 'min()'} if 'submit' in request.args: # user pressed a button if 'switch_min_mean' in request.args: # user checked mean() checkbox - session['switch_min_mean_session'] = options['switch_min_mean'] = \ + flask.session['switch_min_mean_session'] = \ + options['switch_min_mean'] = \ bool(request.args.get('switch_min_mean')) - switch_min_mean_local = session['switch_min_mean_session'] + switch_min_mean_local = flask.session['switch_min_mean_session'] else: # mean() check box is not checked - session['switch_min_mean_session'] = options['switch_min_mean'] = \ + flask.session['switch_min_mean_session'] = \ + options['switch_min_mean'] = \ bool(request.args.get('switch_min_mean')) - switch_min_mean_local = session['switch_min_mean_session'] + switch_min_mean_local = flask.session['switch_min_mean_session'] else: # new page was loaded by clicking link, not submit button options['switch_min_mean'] = switch_min_mean_local = \ - session['switch_min_mean_session'] + flask.session['switch_min_mean_session'] options['hide_lineplot'] = bool(request.args.get('hide_lineplot')) show_lineplot = not options['hide_lineplot'] @@ -792,8 +812,10 @@ try: machine = \ - ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one() - test = ts.query(ts.Test).filter(ts.Test.id == test_id).one() + session.query(ts.Machine) \ + .filter(ts.Machine.id == machine_id) \ + .one() + test = session.query(ts.Test).filter(ts.Test.id == test_id).one() field = ts.sample_fields[field_index] except NoResultFound: return abort(404) @@ -822,8 +844,9 @@ return abort(404) try: - machine = \ - ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one() + machine = session.query(ts.Machine) \ + .filter(ts.Machine.id == machine_id) \ + .one() except NoResultFound: return abort(404) field = ts.sample_fields[field_index] @@ -852,7 +875,7 @@ return abort(400) try: - run = ts.query(ts.Run) \ + run = session.query(ts.Run) \ .options(joinedload(ts.Run.machine)) \ .filter(ts.Run.id == run_id) \ .one() @@ -869,13 +892,14 @@ revision_range = None highlight_run_id = request.args.get('highlight_run') if show_highlight and highlight_run_id and highlight_run_id.isdigit(): - highlight_run = ts.query(ts.Run).filter_by( + highlight_run = session.query(ts.Run).filter_by( id=int(highlight_run_id)).first() if highlight_run is None: abort(404) # Find the neighboring runs, by order. - prev_runs = list(ts.get_previous_runs_on_machine(highlight_run, N=1)) + prev_runs = list(ts.get_previous_runs_on_machine(session, + highlight_run, N=1)) if prev_runs: start_rev = prev_runs[0].order.llvm_project_revision end_rev = highlight_run.order.llvm_project_revision @@ -904,7 +928,7 @@ # we want to load. Actually, we should just make this a single query. # # FIXME: Don't hard code field name. - q = ts.query(field.column, ts.Order.llvm_project_revision, + q = session.query(field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Run.id) \ .join(ts.Run).join(ts.Order) \ .filter(ts.Run.machine_id == machine.id) \ @@ -928,8 +952,9 @@ num_baselines = len(baseline_parameters) for baseline_id, (baseline, baseline_title) in \ enumerate(baseline_parameters): - q_baseline = ts.query(field.column, ts.Order.llvm_project_revision, - ts.Run.start_time, ts.Machine.name) \ + q_baseline = session.query(field.column, + ts.Order.llvm_project_revision, + ts.Run.start_time, ts.Machine.name) \ .join(ts.Run).join(ts.Order).join(ts.Machine) \ .filter(ts.Run.id == baseline.id) \ .filter(ts.Sample.test == test) \ @@ -969,9 +994,9 @@ col = (0, 0, 0) legend.append(LegendItem(machine, test_name, field.name, col, None)) - q = ts.query(sqlalchemy.sql.func.min(field.column), - ts.Order.llvm_project_revision, - sqlalchemy.sql.func.min(ts.Run.start_time)) \ + q = session.query(sqlalchemy.sql.func.min(field.column), + ts.Order.llvm_project_revision, + sqlalchemy.sql.func.min(ts.Run.start_time)) \ .join(ts.Run).join(ts.Order).join(ts.Test) \ .filter(ts.Run.machine_id == machine.id) \ .filter(field.column.isnot(None)) \ @@ -1232,13 +1257,14 @@ def v4_global_status(): from lnt.server.ui import util + session = request.session ts = request.get_testsuite() metric_fields = sorted(list(ts.Sample.get_metric_fields()), key=lambda f: f.name) fields = dict((f.name, f) for f in metric_fields) # Get the latest run. - latest = ts.query(ts.Run.start_time).\ + latest = session.query(ts.Run.start_time).\ order_by(ts.Run.start_time.desc()).first() # If we found an entry, use that. @@ -1257,7 +1283,9 @@ field = fields.get(request.args.get('field', None), metric_fields[0]) # Get the list of all runs we might be interested in. - recent_runs = ts.query(ts.Run).filter(ts.Run.start_time > yesterday).all() + recent_runs = session.query(ts.Run) \ + .filter(ts.Run.start_time > yesterday) \ + .all() # Aggregate the runs by machine. recent_runs_by_machine = multidict.multidict() @@ -1288,7 +1316,8 @@ runs = recent_runs_by_machine[machine] # Get the baseline run for this machine. - baseline = machine.get_closest_previously_reported_run(revision) + baseline = machine.get_closest_previously_reported_run(session, + revision) # Choose the "best" run to report on. We want the most recent one with # the most recent order. @@ -1299,13 +1328,14 @@ reported_run_ids.append(run.id) # Get the set all tests reported in the recent runs. - reported_tests = ts.query(ts.Test.id, ts.Test.name).filter( + reported_tests = session.query(ts.Test.id, ts.Test.name).filter( sqlalchemy.sql.exists('*', sqlalchemy.sql.and_( ts.Sample.run_id.in_(reported_run_ids), ts.Sample.test_id == ts.Test.id))).all() # Load all of the runs we are interested in. - runinfo = lnt.server.reporting.analysis.RunInfo(ts, reported_run_ids) + runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, + reported_run_ids) # Build the test matrix. This is a two dimensional table index by # (machine-index, test-index), where each entry is the percent change. @@ -1344,10 +1374,11 @@ def v4_daily_report_overview(): # Redirect to the report for the most recent submitted run's date. + session = request.session ts = request.get_testsuite() # Get the latest run. - latest = ts.query(ts.Run).\ + latest = session.query(ts.Run).\ order_by(ts.Run.start_time.desc()).limit(1).first() # If we found a run, use it's start time. @@ -1383,6 +1414,7 @@ filter_machine_regex = request.args.get('filter-machine-regex') + session = request.session ts = request.get_testsuite() # Create the report object. @@ -1392,7 +1424,7 @@ # Build the report. try: - report.build() + report.build(request.session) except ValueError: return abort(400) @@ -1447,16 +1479,16 @@ all_machines = set() all_orders = set() for ts in testsuites: - for name, in ts.query(ts.Machine.name): + for name, in session.query(ts.Machine.name): all_machines.add(name) - for name, in ts.query(ts.Order.llvm_project_revision): + for name, in session.query(ts.Order.llvm_project_revision): all_orders.add(name) all_machines = sorted(all_machines) all_orders = sorted(all_orders, key=to_key) return render_template("v4_summary_report_ui.html", config=config, all_machines=all_machines, - all_orders=all_orders) + all_orders=all_orders, **ts_data(ts)) @db_route("/summary_report") @@ -1543,13 +1575,14 @@ except: return False + session = request.session ts = request.get_testsuite() query = request.args.get('q') l = request.args.get('l', 8) default_machine = request.args.get('m', None) assert query - results = lnt.server.db.search.search(ts, query, num_results=l, + results = lnt.server.db.search.search(session, ts, query, num_results=l, default_machine=default_machine) return json.dumps( @@ -1589,12 +1622,13 @@ """Get the baseline object from the user's current session baseline value or None if one is not defined. """ + session = request.session ts = request.get_testsuite() - base_id = session.get(baseline_key()) + base_id = flask.session.get(baseline_key()) if not base_id: return None try: - base = ts.query(ts.Baseline).get(base_id) + base = session.query(ts.Baseline).get(base_id) except NoResultFound: return None return base @@ -1610,6 +1644,7 @@ the paramters, and is ignored. """ + session = request.session ts = request.get_testsuite() # Load the matrix request parameters. form = MatrixOptions(request.form) @@ -1637,12 +1672,13 @@ return abort(404, "Invalid field index: {}".format(field_index)) try: - machine = \ - ts.query(ts.Machine).filter(ts.Machine.id == machine_id).one() + machine = session.query(ts.Machine) \ + .filter(ts.Machine.id == machine_id) \ + .one() except NoResultFound: return abort(404, "Invalid machine ID: {}".format(machine_id)) try: - test = ts.query(ts.Test).filter(ts.Test.id == test_id).one() + test = session.query(ts.Test).filter(ts.Test.id == test_id).one() except NoResultFound: return abort(404, "Invalid test ID: {}".format(test_id)) try: @@ -1674,8 +1710,8 @@ all_orders = set() order_to_id = {} for req in data_parameters: - q = ts.query(req.field.column, ts.Order.llvm_project_revision, - ts.Order.id) \ + q = session.query(req.field.column, ts.Order.llvm_project_revision, + ts.Order.id) \ .join(ts.Run) \ .join(ts.Order) \ .filter(ts.Run.machine_id == req.machine.id) \ @@ -1712,8 +1748,9 @@ baseline_name = backup_baseline for req in data_parameters: - q_baseline = ts.query(req.field.column, ts.Order.llvm_project_revision, - ts.Order.id) \ + q_baseline = session.query(req.field.column, + ts.Order.llvm_project_revision, + ts.Order.id) \ .join(ts.Run) \ .join(ts.Order) \ .filter(ts.Run.machine_id == req.machine.id) \ @@ -1781,7 +1818,7 @@ curr_geomean, False) # Calculate the date of each order. - runs = ts.query(ts.Run.start_time, ts.Order.llvm_project_revision) \ + runs = session.query(ts.Run.start_time, ts.Order.llvm_project_revision) \ .join(ts.Order) \ .filter(ts.Order.llvm_project_revision.in_(all_orders)) \ .all() Index: lnt/tests/builtintest.py =================================================================== --- lnt/tests/builtintest.py +++ lnt/tests/builtintest.py @@ -10,7 +10,6 @@ import lnt.util.ServerUtil as ServerUtil import lnt.util.ImportData as ImportData import lnt.server.config as server_config -import lnt.server.db.v4db class OptsContainer(object): @@ -78,7 +77,7 @@ config.submit_url, report_path, config.verbose, updateMachine=config.update_machine, mergeRun=config.merge) else: - server_report = lnt.util.ImportData.no_submit() + server_report = ImportData.no_submit() if server_report: ImportData.print_report_result(server_report, sys.stdout, sys.stderr, config.verbose) Index: lnt/tests/nt.py =================================================================== --- lnt/tests/nt.py +++ lnt/tests/nt.py @@ -1744,8 +1744,9 @@ import lnt.server.config db = lnt.server.db.v4db.V4DB("sqlite:///:memory:", lnt.server.config.Config.dummy_instance()) + session = db.make_session() result = lnt.util.ImportData.import_and_report( - None, None, db, report_path, 'json', 'nts') + None, None, db, session, report_path, 'json', 'nts') if result is None: fatal("Results were not obtained from submission.") Index: lnt/util/ImportData.py =================================================================== --- lnt/util/ImportData.py +++ lnt/util/ImportData.py @@ -12,7 +12,7 @@ import time -def import_and_report(config, db_name, db, file, format, ts_name, +def import_and_report(config, db_name, db, session, file, format, ts_name, show_sample_count=False, disable_email=False, disable_report=False, updateMachine=False, mergeRun='replace'): @@ -32,18 +32,18 @@ 'import_file': file, } - ts = db.testsuite.get(ts_name) + ts = db.testsuite.get(ts_name, None) if ts is None: result['error'] = "Unknown test suite '%s'!" % ts_name return result - numMachines = ts.getNumMachines() - numRuns = ts.getNumRuns() - numTests = ts.getNumTests() + numMachines = ts.getNumMachines(session) + numRuns = ts.getNumRuns(session) + numTests = ts.getNumTests(session) # If the database gets fragmented, count(*) in SQLite can get really # slow!?! if show_sample_count: - numSamples = ts.getNumSamples() + numSamples = ts.getNumSamples(session) startTime = time.time() try: @@ -88,7 +88,7 @@ (data_schema, ts_name)) return result - run = ts.importDataFromDict(data, config=db_config, + run = ts.importDataFromDict(session, data, config=db_config, updateMachine=updateMachine, mergeRun=mergeRun) except KeyboardInterrupt: @@ -117,18 +117,18 @@ if not disable_report: # This has the side effect of building the run report for # this result. - NTEmailReport.emailReport(result, db, run, report_url, email_config, - toAddress, True) + NTEmailReport.emailReport(result, session, run, report_url, + email_config, toAddress, True) - result['added_machines'] = ts.getNumMachines() - numMachines - result['added_runs'] = ts.getNumRuns() - numRuns - result['added_tests'] = ts.getNumTests() - numTests + result['added_machines'] = ts.getNumMachines(session) - numMachines + result['added_runs'] = ts.getNumRuns(session) - numRuns + result['added_tests'] = ts.getNumTests(session) - numTests if show_sample_count: - result['added_samples'] = ts.getNumSamples() - numSamples + result['added_samples'] = ts.getNumSamples(session) - numSamples result['committed'] = True result['run_id'] = run.id - ts.commit() + session.commit() if db_config: # If we are not in a dummy instance, also run background jobs. # We have to have a commit before we run, so subprocesses can @@ -152,8 +152,10 @@ "database %r does not exist" % shadow_name) # Perform the shadow import. + shadow_session = shadow_db.make_session() shadow_result = import_and_report(config, shadow_name, - shadow_db, file, format, ts_name, + shadow_db, shadow_session, file, + format, ts_name, show_sample_count, disable_email, disable_report, updateMachine) @@ -301,8 +303,8 @@ print >>out, kind, ":", count -def import_from_string(config, db_name, db, ts_name, data, updateMachine=False, - mergeRun='replace'): +def import_from_string(config, db_name, db, session, ts_name, data, + updateMachine=False, mergeRun='replace'): # Stash a copy of the raw submission. # # To keep the temporary directory organized, we keep files in @@ -330,6 +332,6 @@ # should at least reject overly large inputs. result = lnt.util.ImportData.import_and_report( - config, db_name, db, path, '', ts_name, + config, db_name, db, session, path, '', ts_name, updateMachine=updateMachine, mergeRun=mergeRun) return result Index: lnt/util/NTEmailReport.py =================================================================== --- lnt/util/NTEmailReport.py +++ lnt/util/NTEmailReport.py @@ -4,15 +4,15 @@ import urllib import StringIO -import lnt.server.db.v4db import lnt.server.reporting.runs -def emailReport(result, db, run, baseurl, email_config, to, was_added=True): +def emailReport(result, session, run, baseurl, email_config, to, + was_added=True): import email.mime.multipart import email.mime.text - subject, report, html_report = _getReport(result, db, run, baseurl, + subject, report, html_report = _getReport(result, session, run, baseurl, was_added) # Ignore if no to address was given, we do things this way because of the @@ -44,11 +44,9 @@ s.quit() -def _getReport(result, db, run, baseurl, was_added, compare_to=None): - assert isinstance(db, lnt.server.db.v4db.V4DB) - +def _getReport(result, session, run, baseurl, was_added, compare_to=None): data = lnt.server.reporting.runs.generate_run_data( - run, baseurl=baseurl, result=result, compare_to=compare_to, + session, run, baseurl=baseurl, result=result, compare_to=compare_to, num_comparison_runs=10) env = lnt.server.ui.app.create_jinja_environment() Index: lnt/util/ServerUtil.py =================================================================== --- lnt/util/ServerUtil.py +++ lnt/util/ServerUtil.py @@ -72,8 +72,9 @@ with contextlib.closing(config.get_database(db_name)) as db: if db is None: raise ValueError("no default database in instance: %r" % (path,)) + session = db.make_session() return lnt.util.ImportData.import_and_report( - config, db_name, db, file, format='', ts_name='nts', + config, db_name, db, session, file, format='', ts_name='nts', updateMachine=updateMachine, mergeRun=mergeRun) Index: lnt/util/async_ops.py =================================================================== --- lnt/util/async_ops.py +++ lnt/util/async_ops.py @@ -106,6 +106,7 @@ job.start() JOBS.append(job) + # Flag to track if we have disposed of the parents database connections in # this subprocess. clean_db = False @@ -124,15 +125,18 @@ if not clean_db: lnt.server.db.v4db.V4DB.close_all_engines() clean_db = True + sleep(3) logger.info("Running async wrapper: {} ".format(job.__name__) + str(os.getpid())) config = ts_args['db_info'] - _v4db = config.get_database(ts_args['db']) - # with contextlib.closing(_v4db) as db: - ts = _v4db.testsuite[ts_args['tsname']] - nothing = job(ts, **func_args) - assert nothing is None + db = config.get_database(ts_args['db']) + with contextlib.closing(db): + session = db.make_session() + ts = db.testsuite[ts_args['tsname']] + nothing = job(ts, session, **func_args) + assert nothing is None + session.close() end_time = time.time() delta = end_time-start_time msg = "Finished: {name} in {time:.2f}s ".format(name=job.__name__, Index: tests/server/db/CreateV4TestSuite.py =================================================================== --- tests/server/db/CreateV4TestSuite.py +++ tests/server/db/CreateV4TestSuite.py @@ -12,13 +12,14 @@ # Create an in memory database. db = v4db.V4DB("sqlite:///:memory:", Config.dummy_instance()) +session = db.make_session() # We expect exactly the NTS test suite. -test_suites = list(db.query(testsuite.TestSuite)) +test_suites = list(session.query(testsuite.TestSuite)) assert len(test_suites) == 1 # Check the NTS test suite. -ts = db.query(testsuite.TestSuite).filter_by(name="nts").first() +ts = session.query(testsuite.TestSuite).filter_by(name="nts").first() assert ts.name == "nts" assert ts.db_key_name == "NT" assert len(ts.machine_fields) == 2 Index: tests/server/db/CreateV4TestSuiteInstance.py =================================================================== --- tests/server/db/CreateV4TestSuiteInstance.py +++ tests/server/db/CreateV4TestSuiteInstance.py @@ -12,6 +12,7 @@ # Create an in memory database. db = v4db.V4DB("sqlite:///:memory:", Config.dummy_instance()) +session = db.make_session() # Get the test suite wrapper. ts_db = db.testsuite['nts'] @@ -42,63 +43,63 @@ sample.mem_bytes = 58093568 # Add and commit. -ts_db.add(machine) -ts_db.add(order) -ts_db.add(order2) -ts_db.add(order3) +session.add(machine) +session.add(order) +session.add(order2) +session.add(order3) -ts_db.add(run) -ts_db.add(test) -ts_db.add(sample) +session.add(run) +session.add(test) +session.add(sample) field_change = ts_db.FieldChange(order, order2, machine, test, - list(sample.get_primary_fields())[0]) + list(sample.get_primary_fields())[0].id) -ts_db.add(field_change) +session.add(field_change) field_change2 = ts_db.FieldChange(order2, order3, machine, test, - list(sample.get_primary_fields())[1]) -ts_db.add(field_change2) + list(sample.get_primary_fields())[1].id) +session.add(field_change2) TEST_TITLE = "Some regression title" regression = ts_db.Regression(TEST_TITLE, "PR1234", RegressionState.DETECTED) -ts_db.add(regression) +session.add(regression) regression_indicator1 = ts_db.RegressionIndicator(regression, field_change) regression_indicator2 = ts_db.RegressionIndicator(regression, field_change2) -ts_db.add(regression_indicator1) -ts_db.add(regression_indicator2) +session.add(regression_indicator1) +session.add(regression_indicator2) -ts_db.commit() +session.commit() del machine, order, run, test, sample # Fetch the added objects. -machines = ts_db.query(ts_db.Machine).all() +machines = session.query(ts_db.Machine).all() assert len(machines) == 1 machine = machines[0] -orders = ts_db.query(ts_db.Order).all() +orders = session.query(ts_db.Order).all() assert len(orders) == 3 order = orders[0] -runs = ts_db.query(ts_db.Run).all() +runs = session.query(ts_db.Run).all() assert len(runs) == 1 run = runs[0] -tests = ts_db.query(ts_db.Test).all() +tests = session.query(ts_db.Test).all() assert len(tests) == 1 test = tests[0] -samples = ts_db.query(ts_db.Sample).all() +samples = session.query(ts_db.Sample).all() assert len(samples) == 1 sample = samples[0] assert sample.code_size == 100 -regression_indicators = ts_db.query(ts_db.RegressionIndicator).all() +regression_indicators = session.query(ts_db.RegressionIndicator).all() assert len(regression_indicators) == 2 ri = regression_indicators[0] Index: tests/server/db/ImportV4TestSuiteInstance.py =================================================================== --- tests/server/db/ImportV4TestSuiteInstance.py +++ tests/server/db/ImportV4TestSuiteInstance.py @@ -70,20 +70,24 @@ # Load the test database. db = v4db.V4DB("sqlite:///%s" % sys.argv[1], Config.dummy_instance()) +session = db.make_session() # Get the status kinds, and validate the IDs align with the testing IDs. -pass_kind = db.query(db.StatusKind).filter_by(id = lnt.testing.PASS).one() +pass_kind = session.query(testsuite.StatusKind) \ + .filter_by(id = lnt.testing.PASS).one() assert pass_kind.name == "PASS" -fail_kind = db.query(db.StatusKind).filter_by(id = lnt.testing.FAIL).one() +fail_kind = session.query(testsuite.StatusKind) \ + .filter_by(id = lnt.testing.FAIL).one() assert fail_kind.name == "FAIL" -xfail_kind = db.query(db.StatusKind).filter_by(id = lnt.testing.XFAIL).one() +xfail_kind = session.query(testsuite.StatusKind) \ + .filter_by(id = lnt.testing.XFAIL).one() assert xfail_kind.name == "XFAIL" # Load the imported test suite. ts = db.testsuite['nts'] # Validate the machine. -machines = list(ts.query(ts.Machine)) +machines = list(session.query(ts.Machine)) assert len(machines) == 1 machine = machines[0] assert machine.name == 'LNT SAMPLE MACHINE' @@ -94,13 +98,13 @@ assert parameters['extrakey'] == u'extravalue' # Validate the tests. -tests = list(ts.query(ts.Test)) +tests = list(session.query(ts.Test)) assert len(tests) == 1 test = tests[0] assert tests[0].name == 'sampletest' # Validate the orders. -orders = list(ts.query(ts.Order).order_by(ts.Order.llvm_project_revision)) +orders = list(session.query(ts.Order).order_by(ts.Order.llvm_project_revision)) assert len(orders) == 2 order_a,order_b = orders print order_a @@ -113,7 +117,7 @@ assert order_b.llvm_project_revision == '2' # Validate the runs. -runs = list(ts.query(ts.Run).order_by(ts.Run.order_id)) +runs = list(session.query(ts.Run).order_by(ts.Run.order_id)) assert len(runs) == 2 run_a,run_b = runs assert run_a.machine is machine @@ -128,7 +132,7 @@ assert sorted(run_b.parameters.items()) == [('inferred_run_order', '2')] # Validate the samples. -samples = list(ts.query(ts.Sample)\ +samples = list(session.query(ts.Sample)\ .join(ts.Run) \ .order_by(ts.Run.order_id, ts.Sample.id)) assert len(samples) == 3 Index: tests/server/db/blacklist.py =================================================================== --- tests/server/db/blacklist.py +++ tests/server/db/blacklist.py @@ -27,10 +27,10 @@ class BlacklistProcessingTest(unittest.TestCase): """Test the Rules facility.""" - def _mkorder(self, ts, rev): + def _mkorder(self, session, ts, rev): order = ts.Order() order.llvm_project_revision = rev - ts.add(order) + session.add(order) return order def setUp(self): @@ -45,18 +45,22 @@ app.preprocess_request() r.g.db_name = "default" r.g.testsuite_name = "nts" + r.request.db = app.instance.get_database(r.g.db_name) + r.request.session = r.request.db.make_session() self.ts = r.request.get_testsuite() self.ts_db = self.ts + self.session = r.request.session + session = self.session ts_db = self.ts_db - order1234 = self.order1234 = self._mkorder(ts_db, "1234") - order1236 = self.order1236 = self._mkorder(ts_db, "1236") + order1234 = self.order1234 = self._mkorder(session, ts_db, "1234") + order1236 = self.order1236 = self._mkorder(session, ts_db, "1236") machine = self.machine = ts_db.Machine("test-machine") - ts_db.add(machine) + session.add(machine) a_field = ts_db.Sample.fields[0] - ts_db.commit() + session.commit() test = self.test = ts_db.Test("Foo") test2 = self.test2 = ts_db.Test("SingleSource/Foo/Bar/baz") @@ -67,44 +71,45 @@ order1236, machine, test, - a_field) + a_field.id) self.field_change2 = ts_db.FieldChange(order1234, order1236, machine, test2, - a_field) + a_field.id) self.field_change3 = ts_db.FieldChange(order1234, order1236, machine, test3, - a_field) + a_field.id) self.field_change4 = ts_db.FieldChange(order1234, order1236, machine, test4, - a_field) - ts_db.add(self.field_change1) - ts_db.add(self.field_change2) - ts_db.add(self.field_change3) - ts_db.add(self.field_change4) + a_field.id) + session.add(self.field_change1) + session.add(self.field_change2) + session.add(self.field_change3) + session.add(self.field_change4) - ts_db.commit() + session.commit() def test_blacklist(self): """Check we filter by benchmark name correctly.""" + session = self.session ts = self.ts_db fc1 = self.field_change1 fc2 = self.field_change2 fc3 = self.field_change3 fc4 = self.field_change4 - valid = blacklist.filter_by_benchmark_name(ts, fc1) + valid = blacklist.filter_by_benchmark_name(session, ts, fc1) self.assertTrue(valid, "Expect this to not be filtered.") - valid = blacklist.filter_by_benchmark_name(ts, fc2) + valid = blacklist.filter_by_benchmark_name(session, ts, fc2) self.assertTrue(valid, "Expect this to not be filtered.") - bad = blacklist.filter_by_benchmark_name(ts, fc3) + bad = blacklist.filter_by_benchmark_name(session, ts, fc3) self.assertFalse(bad, "Expect this to be filtered by regex.") - bad = blacklist.filter_by_benchmark_name(ts, fc4) + bad = blacklist.filter_by_benchmark_name(session, ts, fc4) self.assertFalse(bad, "Expect this to be filtered by blacklist.") if __name__ == '__main__': Index: tests/server/db/search.py =================================================================== --- tests/server/db/search.py +++ tests/server/db/search.py @@ -32,6 +32,7 @@ # Get the database. self.db = config.get_database('default') + self.session = self.db.make_session() # Load the database. for r in imported_runs: with tempfile.NamedTemporaryFile() as f: @@ -42,7 +43,7 @@ open(f.name, 'w').write(data) result = lnt.util.ImportData.import_and_report( - None, 'default', self.db, f.name, + None, 'default', self.db, self.session, f.name, format='', ts_name='nts', show_sample_count=False, disable_email=True, disable_report=True, updateMachine=False, mergeRun='reject') @@ -54,43 +55,47 @@ for r in rs] def test_specific(self): + session = self.session ts = self.db.testsuite.get('nts') - results = self._mangleResults(search(ts, 'machine1 #5625')) + results = self._mangleResults(search(session, ts, 'machine1 #5625')) self.assertEqual(results, [ ('machine1', '5625') ]) - results = self._mangleResults(search(ts, 'machine1 #5624')) + results = self._mangleResults(search(session, ts, 'machine1 #5624')) self.assertEqual(results, [ ('machine1', '5624') ]) def test_multiple_orders(self): + session = self.session ts = self.db.testsuite.get('nts') - results = self._mangleResults(search(ts, 'machine1 #56')) + results = self._mangleResults(search(session, ts, 'machine1 #56')) self.assertEqual(results, [ ('machine1', '5625'), ('machine1', '5624') ]) def test_nohash(self): + session = self.session ts = self.db.testsuite.get('nts') - results = self._mangleResults(search(ts, 'machine1 r56')) + results = self._mangleResults(search(session, ts, 'machine1 r56')) self.assertEqual(results, [ ('machine1', '5625'), ('machine1', '5624') ]) - results = self._mangleResults(search(ts, 'machine1 56')) + results = self._mangleResults(search(session, ts, 'machine1 56')) self.assertEqual(results, [ ('machine1', '5625'), ('machine1', '5624') ]) def test_default_order(self): + session = self.session ts = self.db.testsuite.get('nts') - results = self._mangleResults(search(ts, 'machi ne3')) + results = self._mangleResults(search(session, ts, 'machi ne3')) self.assertEqual(results, [ ('machine3', '11324'), ('machine3', '7623'), @@ -99,9 +104,11 @@ ]) def test_default_machine(self): + session = self.session ts = self.db.testsuite.get('nts') - results = self._mangleResults(search(ts, '65', default_machine=3)) + results = self._mangleResults(search(session, ts, '65', + default_machine=3)) self.assertEqual(results, [ ('machine2', '6512') ]) Index: tests/server/ui/change_processing.py =================================================================== --- tests/server/ui/change_processing.py +++ tests/server/ui/change_processing.py @@ -22,10 +22,10 @@ logging.basicConfig(level=logging.DEBUG) -def _mkorder(ts, rev): +def _mkorder(session, ts, rev): order = ts.Order() order.llvm_project_revision = rev - ts.add(order) + session.add(order) return order @@ -36,40 +36,41 @@ """Bind to the LNT test instance.""" self.db = v4db.V4DB("sqlite:///:memory:", Config.dummy_instance()) + session = self.session = self.db.make_session() # Get the test suite wrapper. ts_db = self.ts_db = self.db.testsuite['nts'] - order1234 = self.order1234 = _mkorder(ts_db, "1234") - order1235 = self.order1235 = _mkorder(ts_db, "1235") - order1236 = self.order1236 = _mkorder(ts_db, "1236") - order1237 = self.order1237 = _mkorder(ts_db, "1237") - order1238 = self.order1238 = _mkorder(ts_db, "1238") + order1234 = self.order1234 = _mkorder(session, ts_db, "1234") + order1235 = self.order1235 = _mkorder(session, ts_db, "1235") + order1236 = self.order1236 = _mkorder(session, ts_db, "1236") + order1237 = self.order1237 = _mkorder(session, ts_db, "1237") + order1238 = self.order1238 = _mkorder(session, ts_db, "1238") start_time = end_time = datetime.datetime.utcnow() machine = self.machine = ts_db.Machine("test-machine") - ts_db.add(machine) + session.add(machine) test = self.test = ts_db.Test("foo") - ts_db.add(test) + session.add(test) machine2 = self.machine2 = ts_db.Machine("test-machine2") - ts_db.add(machine2) + session.add(machine2) test2 = self.test2 = ts_db.Test("bar") - ts_db.add(test2) + session.add(test2) run = self.run = ts_db.Run(machine, order1235, start_time, end_time) - ts_db.add(run) + session.add(run) run2 = self.run2 = ts_db.Run(machine2, order1235, start_time, end_time) - ts_db.add(run2) + session.add(run2) sample = ts_db.Sample(run, test, compile_time=1.0, score=4.2) - ts_db.add(sample) + session.add(sample) a_field = self.a_field = list(sample.get_primary_fields())[0] self.a_field2 = list(sample.get_primary_fields())[1] @@ -78,53 +79,54 @@ order1236, machine, test, - a_field) + a_field.id) field_change.run = run - ts_db.add(field_change) + session.add(field_change) fc_mach2 = ts_db.FieldChange(order1234, order1236, machine2, test, - a_field) + a_field.id) fc_mach2.run = run2 - ts_db.add(fc_mach2) + session.add(fc_mach2) field_change2 = self.field_change2 = ts_db.FieldChange(order1235, order1236, machine, test, - a_field) + a_field.id) field_change2.run = run - ts_db.add(field_change2) + session.add(field_change2) field_change3 = self.field_change3 = ts_db.FieldChange(order1237, order1238, machine, test, - a_field) - ts_db.add(field_change3) + a_field.id) + session.add(field_change3) regression = self.regression = ts_db.Regression("Regression of 1 benchmarks:", "PR1234", RegressionState.DETECTED) - ts_db.add(self.regression) + session.add(self.regression) self.regression_indicator1 = ts_db.RegressionIndicator(regression, field_change) self.regression_indicator2 = ts_db.RegressionIndicator(regression, field_change2) - ts_db.add(self.regression_indicator1) - ts_db.add(self.regression_indicator2) + session.add(self.regression_indicator1) + session.add(self.regression_indicator2) # All the regressions we detected. self.regressions = [regression] - ts_db.commit() + session.commit() def tearDown(self): - self.db.close_all_engines() + self.db.close() def test_startup(self): pass def test_change_grouping_criteria(self): + session = self.session ts_db = self.ts_db # Check simple overlap checks work. @@ -134,21 +136,21 @@ "Should not be overlapping") # Check non-overlapping changes are always False. - ret, reg = identify_related_changes(ts_db, self.field_change3) + ret, reg = identify_related_changes(session, ts_db, self.field_change3) self.assertFalse(ret, "Ranges don't overlap, should not match") self.regressions.append(reg) # Check a regression matches if all fields match. - ret, _ = identify_related_changes(ts_db, self.field_change) + ret, _ = identify_related_changes(session, ts_db, self.field_change) self.assertTrue(ret, "Should Match.") field_change7 = ts_db.FieldChange(self.order1234, self.order1235, self.machine2, self.test2, - self.a_field) - ts_db.add(field_change7) - ret, reg = identify_related_changes(ts_db, field_change7) + self.a_field.id) + session.add(field_change7) + ret, reg = identify_related_changes(session, ts_db, field_change7) self.assertNotEquals(self.regression, reg) self.assertFalse(ret, "No match with different machine and tests.") self.regressions.append(reg) @@ -156,61 +158,66 @@ self.order1235, self.machine2, self.test, - self.a_field) + self.a_field.id) # Check a regression matches if all fields match. - ret, _ = identify_related_changes(ts_db, field_change4) + ret, _ = identify_related_changes(session, ts_db, field_change4) self.assertTrue(ret, "Should Match with different machine.") field_change5 = ts_db.FieldChange(self.order1234, self.order1235, self.machine, self.test2, - self.a_field) + self.a_field.id) # Check a regression matches if all fields match. - ret, _ = identify_related_changes(ts_db, field_change5) + ret, _ = identify_related_changes(session, ts_db, field_change5) self.assertTrue(ret, "Should Match with different tests.") field_change6 = ts_db.FieldChange(self.order1234, self.order1235, self.machine, self.test, - self.a_field2) + self.a_field2.id) # Check a regression matches if all fields match. - ret, _ = identify_related_changes(ts_db, field_change6) + ret, _ = identify_related_changes(session, ts_db, field_change6) self.assertTrue(ret, "Should Match with different fields.") - ts_db.commit() + session.commit() - r2 = rebuild_title(ts_db, self.regression) + r2 = rebuild_title(session, ts_db, self.regression) expected_title = "Regression of 6 benchmarks: foo, bar" self.assertEquals(r2.title, expected_title) def test_regression_evolution(self): + session = self.session ts_db = self.ts_db - rule_update_fixed_regressions.regression_evolution(ts_db, self.regressions) + rule_update_fixed_regressions.regression_evolution( + session, ts_db, self.regressions) def test_fc_deletion(self): - delete_fieldchange(self.ts_db, self.field_change) - delete_fieldchange(self.ts_db, self.field_change2) - delete_fieldchange(self.ts_db, self.field_change3) + session = self.session + ts_db = self.ts_db + delete_fieldchange(session, ts_db, self.field_change) + delete_fieldchange(session, ts_db, self.field_change2) + delete_fieldchange(session, ts_db, self.field_change3) def test_run_deletion(self): """Do the FC and RIs get cleaned up when runs are deleted?""" + session = self.session ts_db = self.ts_db - run_idsq = ts_db.query(ts_db.Run.id).all() - fc_ids = ts_db.query(ts_db.FieldChange.id).all() - ri_ids = ts_db.query(ts_db.RegressionIndicator.id).all() + run_idsq = session.query(ts_db.Run.id).all() + fc_ids = session.query(ts_db.FieldChange.id).all() + ri_ids = session.query(ts_db.RegressionIndicator.id).all() run_ids = [row[0] for row in run_idsq] - runs = ts_db.query(ts_db.Run).filter(ts_db.Run.id.in_(run_ids)).all() + runs = session.query(ts_db.Run).filter(ts_db.Run.id.in_(run_ids)).all() for run in runs: - ts_db.delete(run) + session.delete(run) - run_ids_new = ts_db.query(ts_db.Run.id).all() - fc_ids_new = ts_db.query(ts_db.FieldChange.id).all() - ri_ids_new = ts_db.query(ts_db.RegressionIndicator.id).all() + run_ids_new = session.query(ts_db.Run.id).all() + fc_ids_new = session.query(ts_db.FieldChange.id).all() + ri_ids_new = session.query(ts_db.RegressionIndicator.id).all() # Make sure there was some runs. self.assertNotEqual(len(run_idsq), 0) self.assertNotEqual(len(fc_ids), 0)