Index: tests/server/reporting/analysis.py =================================================================== --- tests/server/reporting/analysis.py +++ tests/server/reporting/analysis.py @@ -118,20 +118,20 @@ min, False, False, curr_samples, prev, None, None) self.assertFalse(uninteresting.is_result_interesting()) - self.assertEquals(uninteresting.get_test_status(), UNCHANGED_PASS) - self.assertEquals(uninteresting.get_value_status(), UNCHANGED_PASS) + self.assertEqual(uninteresting.get_test_status(), UNCHANGED_PASS) + self.assertEqual(uninteresting.get_value_status(), UNCHANGED_PASS) def test_slower(self): """Test getting a simple regression.""" slower = ComparisonResult(min, False, False, [10.], [5.], None, None) - self.assertEquals(slower.get_value_status(), REGRESSED) + self.assertEqual(slower.get_value_status(), REGRESSED) self.assertTrue(slower.is_result_interesting()) def test_faster(self): """Test getting a simple improvement.""" faster = ComparisonResult(min, False, False, [5.], [10.], None, None) - self.assertEquals(faster.get_value_status(), IMPROVED) + self.assertEqual(faster.get_value_status(), IMPROVED) self.assertTrue(faster.is_result_interesting()) def test_really_faster(self): @@ -139,49 +139,49 @@ faster = ComparisonResult( min, False, False, [5., 6.], [10., 10., 10.], None, None) - self.assertEquals(faster.get_value_status(), IMPROVED) + self.assertEqual(faster.get_value_status(), IMPROVED) self.assertTrue(faster.is_result_interesting()) def test_improved_status(self): """Test getting a test status improvement.""" improved = ComparisonResult(min, False, True, [1.], None, None, None) - self.assertEquals(improved.get_test_status(), IMPROVED) + self.assertEqual(improved.get_test_status(), IMPROVED) def test_regressed_status(self): """Test getting a test status improvement.""" improved = ComparisonResult(min, True, False, None, [10.], None, None) - self.assertEquals(improved.get_test_status(), REGRESSED) + self.assertEqual(improved.get_test_status(), REGRESSED) def test_keep_on_failing_status(self): """Test getting a repeated fail.""" improved = ComparisonResult(min, True, True, None, None, None, None) - self.assertEquals(improved.get_test_status(), UNCHANGED_FAIL) + self.assertEqual(improved.get_test_status(), UNCHANGED_FAIL) def test_noticeable_regression(self): """Test a big looking jump.""" regressed = ComparisonResult( min, False, False, [10.0, 10.1], [5.0, 5.1, 4.9, 5.0], None, None) - self.assertEquals(regressed.get_value_status(), REGRESSED) + self.assertEqual(regressed.get_value_status(), REGRESSED) def test_no_regression_flat_line(self): """This is a flat line, it should have no changes.""" flat = ComparisonResult( min, False, False, [1.0], FLAT_LINE[0:10], None, None) - self.assertEquals(flat.get_value_status(), UNCHANGED_PASS) + self.assertEqual(flat.get_value_status(), UNCHANGED_PASS) def test_no_regression_flat_line_noise(self): """Now 4% noise.""" flat = ComparisonResult( min, False, False, [1.020], FLAT_NOISE[0:10], None, None) ret = flat.get_value_status() - self.assertEquals(ret, UNCHANGED_PASS) + self.assertEqual(ret, UNCHANGED_PASS) def test_big_no_regression_flat_line_noise(self): """Same data, but bigger 10 + 5% variation.""" flat = ComparisonResult( min, False, False, [10.25], FLAT_NOISE2[0:10], None, None) ret = flat.get_value_status() - self.assertEquals(ret, UNCHANGED_PASS) + self.assertEqual(ret, UNCHANGED_PASS) def test_big_no_regression_flat_line_multi(self): """Same data, but bigger 10 + 5% variation, multisample current.""" @@ -189,135 +189,135 @@ min, False, False, [10.0606, 10.4169, 10.1859], BIG_NUMBERS_FLAT[0:10], None, None) ret = flat.get_value_status() - self.assertEquals(ret, UNCHANGED_PASS) + self.assertEqual(ret, UNCHANGED_PASS) def test_simple_regression(self): """Flat line that jumps to another flat line.""" flat = ComparisonResult( min, False, False, [SIMPLE_REGRESSION[10]], SIMPLE_REGRESSION[0:9], None, None) - self.assertEquals(flat.get_value_status(), REGRESSED) + self.assertEqual(flat.get_value_status(), REGRESSED) def test_noisy_regression_5(self): """A regression in 5% noise.""" flat = ComparisonResult(min, False, False, [12.2821], REGRESS_5[0:9], None, None) - self.assertEquals(flat.get_value_status(), REGRESSED) + self.assertEqual(flat.get_value_status(), REGRESSED) def test_noisy_regression_5_multi(self): """A regression in 5% noise, more current samples.""" flat = ComparisonResult(min, False, False, [12.2821, 12.2141, 12.3077], MS_5_REG[0:9], None, None) ret = flat.get_value_status() - self.assertEquals(ret, REGRESSED) + self.assertEqual(ret, REGRESSED) def test_simple_improvement(self): """An improvement without noise.""" flat = ComparisonResult(min, False, False, [IMP[10]], IMP[0:9], None, None) - self.assertEquals(flat.get_value_status(), IMPROVED) + self.assertEqual(flat.get_value_status(), IMPROVED) def test_noise_improvement(self): """An improvement with 5% noise.""" flat = ComparisonResult(min, False, False, [IMP_NOISE[10]], IMP_NOISE[0:9], None, None) - self.assertEquals(flat.get_value_status(), IMPROVED) + self.assertEqual(flat.get_value_status(), IMPROVED) def test_bimodal(self): """A bimodal line, with no regressions.""" bimodal = ComparisonResult(min, False, False, [BIMODAL[10]], BIMODAL[0:9], None, None) - self.assertEquals(bimodal.get_value_status(), UNCHANGED_PASS) + self.assertEqual(bimodal.get_value_status(), UNCHANGED_PASS) def test_noise_bimodal(self): """Bimodal line with 5% noise.""" bimodal = ComparisonResult(min, False, False, [BIMODAL_NOISE[10]], BIMODAL_NOISE[0:9], None, None) - self.assertEquals(bimodal.get_value_status(), UNCHANGED_PASS) + self.assertEqual(bimodal.get_value_status(), UNCHANGED_PASS) def test_bimodal_alternating(self): """Bimodal which sticks in a mode for a while.""" bimodal = ComparisonResult(min, False, False, [BM_ALTERNATE[10]], BM_ALTERNATE[0:9], None, None) - self.assertEquals(bimodal.get_value_status(), UNCHANGED_PASS) + self.assertEqual(bimodal.get_value_status(), UNCHANGED_PASS) def test_noise_bimodal_alternating(self): """Bimodal alternating with 5% noise.""" bimodal = ComparisonResult(min, False, False, [BM_AL_NOISE[10]], BM_AL_NOISE[0:9], None, None) - self.assertEquals(bimodal.get_value_status(), UNCHANGED_PASS) + self.assertEqual(bimodal.get_value_status(), UNCHANGED_PASS) def test_bimodal_alternating_regression(self): """Bimodal alternating regression.""" bimodal = ComparisonResult(min, False, False, [BM_AL_REG[11]], BM_AL_REG[0:10], None, None) - self.assertEquals(bimodal.get_value_status(), REGRESSED) + self.assertEqual(bimodal.get_value_status(), REGRESSED) def test_bimodal_regression(self): """A regression in a bimodal line.""" bimodal = ComparisonResult(min, False, False, [BM_REGRESSION[12]], BM_REGRESSION[0:11], None, None) - self.assertEquals(bimodal.get_value_status(), REGRESSED) + self.assertEqual(bimodal.get_value_status(), REGRESSED) def test_noise_bimodal_regression(self): bimodal = ComparisonResult( min, False, False, [BM_REGS_NOISE[12]], BM_REGS_NOISE[0:11], None, None) - self.assertEquals(bimodal.get_value_status(), REGRESSED) + self.assertEqual(bimodal.get_value_status(), REGRESSED) def test_bimodal_overlapping_regression(self): bimodal = ComparisonResult(min, False, False, [BM_REG_OVERLAP[12]], BM_REG_OVERLAP[0:11], None, None) - self.assertEquals(bimodal.get_value_status(), REGRESSED) + self.assertEqual(bimodal.get_value_status(), REGRESSED) def test_noise_bimodal_overlapping_regression(self): bimodal = ComparisonResult( min, False, False, [BM_REG_OVER_NOISE[12]], BM_REG_OVER_NOISE[0:11], None, None) - self.assertEquals(bimodal.get_value_status(), REGRESSED) + self.assertEqual(bimodal.get_value_status(), REGRESSED) def test_single_spike(self): spike = ComparisonResult( min, False, False, [SPIKE[11]], SPIKE[0:10], None, None) # Fixme - # self.assertEquals(spike.get_value_status(), UNCHANGED_PASS) + # self.assertEqual(spike.get_value_status(), UNCHANGED_PASS) def test_noise_single_spike(self): spike = ComparisonResult( min, False, False, [NOISE_SPIKE[8]], NOISE_SPIKE[0:7], None, None) # Fixme - # self.assertEquals(spike.get_value_status(), UNCHANGED_PASS) + # self.assertEqual(spike.get_value_status(), UNCHANGED_PASS) def test_slow_regression(self): slow = ComparisonResult( min, False, False, [SLOW_REG[12]], SLOW_REG[0:11], None, None) - self.assertEquals(slow.get_value_status(), REGRESSED) + self.assertEqual(slow.get_value_status(), REGRESSED) def test_noise_slow_regression(self): slow = ComparisonResult( min, False, False, [SLOW_REG_NOISE[12]], SLOW_REG_NOISE[0:11], None, None) - self.assertEquals(slow.get_value_status(), REGRESSED) + self.assertEqual(slow.get_value_status(), REGRESSED) def test_slow_improvement(self): slow = ComparisonResult( min, False, False, [SLOW_IMP[12]], SLOW_IMP[0:11], None, None) # Fixme - # self.assertEquals(slow.get_value_status(), IMPROVED) + # self.assertEqual(slow.get_value_status(), IMPROVED) def test_noise_slow_improvement(self): slow = ComparisonResult( min, False, False, [SLOW_IMP_NOISE[12]], SLOW_IMP_NOISE[0:11], None, None) # Fixme - # self.assertEquals(slow.get_value_status(), IMPROVED) + # self.assertEqual(slow.get_value_status(), IMPROVED) def test_handle_zero_sample(self): for agfn in (min, median): zeroSample = ComparisonResult( agfn, False, False, [0.005, 0.0047, 0.0048], [0.0, 0.01, 0.01], None, None) - self.assertEquals(zeroSample.get_value_status(), UNCHANGED_PASS) + self.assertEqual(zeroSample.get_value_status(), UNCHANGED_PASS) class AbsMinTester(unittest.TestCase): Index: tests/server/ui/change_processing.py =================================================================== --- tests/server/ui/change_processing.py +++ tests/server/ui/change_processing.py @@ -213,7 +213,7 @@ r2 = rebuild_title(session, ts_db, self.regression) expected_title = "Regression of 6 benchmarks: foo, bar" - self.assertEquals(r2.title, expected_title) + self.assertEqual(r2.title, expected_title) def test_regression_evolution(self): session = self.session Index: tests/server/ui/test_api.py =================================================================== --- tests/server/ui/test_api.py +++ tests/server/ui/test_api.py @@ -139,12 +139,12 @@ # All machines returns the list of machines with parameters, but no runs. j = check_json(client, 'api/db_default/v4/nts/machines/') self._check_response_is_well_formed(j) - self.assertEquals(j['machines'], machines_expected_response) + self.assertEqual(j['machines'], machines_expected_response) self.assertIsNone(j.get('runs')) j = check_json(client, 'api/db_default/v4/nts/machines') self._check_response_is_well_formed(j) - self.assertEquals(j['machines'], machines_expected_response) + self.assertEqual(j['machines'], machines_expected_response) self.assertIsNone(j.get('runs')) # Machine + properties + run information. @@ -183,7 +183,7 @@ """ Check /orders/n returns the expected order information.""" client = self.client j = check_json(client, 'api/db_default/v4/nts/orders/1') - self.assertEquals(j['orders'][0], order_expected_response) + self.assertEqual(j['orders'][0], order_expected_response) self._check_response_is_well_formed(j) check_json(client, 'api/db_default/v4/nts/orders/100', expected_code=404) @@ -192,7 +192,7 @@ client = self.client j = check_json(client, 'api/db_default/v4/nts/samples/1') self._check_response_is_well_formed(j) - self.assertEquals(sample_expected_response, j['samples'][0]) + self.assertEqual(sample_expected_response, j['samples'][0]) check_json(client, 'api/db_default/v4/nts/samples/1000', expected_code=404) def test_graph_api(self): Index: tests/server/ui/test_api_modify.py =================================================================== --- tests/server/ui/test_api_modify.py +++ tests/server/ui/test_api_modify.py @@ -65,8 +65,8 @@ machine_after = check_json(client, 'api/db_default/v4/nts/machines/1') machine_after = machine_after['machine'] for key in ('hardware', 'os', 'hostname', 'new_parameter', 'uname'): - self.assertEquals(machine_after.get(key, None), - data['machine'].get(key, None)) + self.assertEqual(machine_after.get(key, None), + data['machine'].get(key, None)) def test_00_rename_machine(self): """Check rename POST request to /machines/n"""