Remove additional //testing pylint disables
Removes the following disables from the //testing pylintrc file:
* anomalous-backslash-in-string
* consider-using-enumerate
* deprecated-method
* deprecated-module
* function-redefined
* global-statement
* logging-not-lazy
* reimported
Bug: 353942917
Change-Id: I09c2508a9418e445b458f838db3cee0d2c77d7d0
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5777806
Commit-Queue: Ben Pastene <[email protected]>
Reviewed-by: Ben Pastene <[email protected]>
Auto-Submit: Brian Sheedy <[email protected]>
Cr-Commit-Position: refs/heads/main@{#1340582}
diff --git a/testing/buildbot/generate_buildbot_json_unittest.py b/testing/buildbot/generate_buildbot_json_unittest.py
index aceca7f..7429bca 100755
--- a/testing/buildbot/generate_buildbot_json_unittest.py
+++ b/testing/buildbot/generate_buildbot_json_unittest.py
@@ -2166,8 +2166,8 @@
self.assertRegex(
joined_lines, 'File chromium.test.json did not have the following'
' expected contents:.*')
- self.assertRegex(joined_lines, '.*--- expected.*')
- self.assertRegex(joined_lines, '.*\+\+\+ current.*')
+ self.assertRegex(joined_lines, r'.*--- expected.*')
+ self.assertRegex(joined_lines, r'.*\+\+\+ current.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2268,8 +2268,8 @@
):
fbb.check_input_file_consistency(verbose=True)
joined_lines = '\n'.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\+ chromium\..*test.*')
- self.assertRegex(joined_lines, '.*\- chromium\..*test.*')
+ self.assertRegex(joined_lines, r'.*\+ chromium\..*test.*')
+ self.assertRegex(joined_lines, r'.*\- chromium\..*test.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2282,8 +2282,8 @@
):
fbb.check_input_file_consistency(verbose=True)
joined_lines = ' '.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\+.*Fake Tester.*')
- self.assertRegex(joined_lines, '.*\-.*Fake Tester.*')
+ self.assertRegex(joined_lines, r'.*\+.*Fake Tester.*')
+ self.assertRegex(joined_lines, r'.*\-.*Fake Tester.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2304,7 +2304,7 @@
with self.assertRaises(generate_buildbot_json.BBGenErr):
fbb.check_input_file_consistency(verbose=True)
joined_lines = ' '.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\- Fake Tester.*')
+ self.assertRegex(joined_lines, r'.*\- Fake Tester.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2325,8 +2325,8 @@
with self.assertRaises(generate_buildbot_json.BBGenErr):
fbb.check_input_file_consistency(verbose=True)
joined_lines = ' '.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\+ Fake Tester.*')
- self.assertRegex(joined_lines, '.*\- Fake Tester.*')
+ self.assertRegex(joined_lines, r'.*\+ Fake Tester.*')
+ self.assertRegex(joined_lines, r'.*\- Fake Tester.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2347,8 +2347,8 @@
with self.assertRaises(generate_buildbot_json.BBGenErr):
fbb.check_input_file_consistency(verbose=True)
joined_lines = ' '.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\+ suite_.*')
- self.assertRegex(joined_lines, '.*\- suite_.*')
+ self.assertRegex(joined_lines, r'.*\+ suite_.*')
+ self.assertRegex(joined_lines, r'.*\- suite_.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2368,8 +2368,8 @@
with self.assertRaises(generate_buildbot_json.BBGenErr):
fbb.check_input_file_consistency(verbose=True)
joined_lines = ' '.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\+ suite_.*')
- self.assertRegex(joined_lines, '.*\- suite_.*')
+ self.assertRegex(joined_lines, r'.*\+ suite_.*')
+ self.assertRegex(joined_lines, r'.*\- suite_.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2742,8 +2742,8 @@
with self.assertRaises(generate_buildbot_json.BBGenErr):
fbb.check_input_file_consistency(verbose=True)
joined_lines = '\n'.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\+ ._mixin.*')
- self.assertRegex(joined_lines, '.*\- ._mixin.*')
+ self.assertRegex(joined_lines, r'.*\+ ._mixin.*')
+ self.assertRegex(joined_lines, r'.*\- ._mixin.*')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2878,7 +2878,7 @@
):
fbb.check_input_file_consistency(verbose=True)
joined_lines = '\n'.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\- builder_mixin')
+ self.assertRegex(joined_lines, r'.*\- builder_mixin')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2892,8 +2892,8 @@
):
fbb.check_input_file_consistency(verbose=True)
joined_lines = '\n'.join(fbb.printed_lines)
- self.assertRegex(joined_lines, '.*\- a_test')
- self.assertRegex(joined_lines, '.*\+ a_test')
+ self.assertRegex(joined_lines, r'.*\- a_test')
+ self.assertRegex(joined_lines, r'.*\+ a_test')
fbb.printed_lines = []
self.assertFalse(fbb.printed_lines)
@@ -2902,7 +2902,8 @@
LUCI_MILO_CFG)
with self.assertRaisesRegex(
generate_buildbot_json.BBGenErr,
- f'Invalid \.pyl file \'{re.escape(self.args.test_suites_pyl_path)}\'.*',
+ f'Invalid \\.pyl file '
+ f"'{re.escape(self.args.test_suites_pyl_path)}'.*",
):
fbb.check_input_file_consistency(verbose=True)
self.assertEqual(fbb.printed_lines, [
diff --git a/testing/chromoting/browser_tests_launcher.py b/testing/chromoting/browser_tests_launcher.py
index 9e88b3b..b0445f5 100644
--- a/testing/chromoting/browser_tests_launcher.py
+++ b/testing/chromoting/browser_tests_launcher.py
@@ -3,8 +3,6 @@
# found in the LICENSE file.
"""Utility script to launch browser-tests on the Chromoting bot."""
-from __future__ import print_function
-
import argparse
import time
@@ -20,8 +18,6 @@
from chromoting_test_utilities import TestMachineCleanup
SUCCESS_INDICATOR = 'SUCCESS: all tests passed.'
-TEST_FAILURE = False
-FAILING_TESTS = ''
BROWSER_NOT_STARTED_ERROR = (
'Still waiting for the following processes to finish')
TIME_OUT_INDICATOR = '(TIMED OUT)'
@@ -40,7 +36,6 @@
host_log_file_names: Array of host logs created for this command, including
retries.
"""
- global TEST_FAILURE, FAILING_TESTS
host_log_file_names = []
retries = 0
@@ -110,35 +105,41 @@
retries += 1
# Check that the test passed.
+ test_failure = False
+ failing_tests = ''
if SUCCESS_INDICATOR not in results:
- TEST_FAILURE = True
+ test_failure = True
# Add this command-line to list of tests that failed.
- FAILING_TESTS += command
+ failing_tests = command
- return host_log_file_names
+ return host_log_file_names, test_failure, failing_tests
-def main(args):
+def run_tests(args):
InitialiseTestMachineForLinux(args.cfg_file)
host_log_files = []
+ have_test_failure = False
+ all_failing_tests = ''
with open(args.commands_file) as f:
for line in f:
# Replace the PROD_DIR value in the command-line with
# the passed in value.
line = line.replace(PROD_DIR_ID, args.prod_dir)
# Launch specified command line for test.
- host_log_files.extend(LaunchBTCommand(args, line))
+ log_files, test_failure, failing_tests = LaunchBTCommand(args, line)
+ host_log_files.extend(log_files)
+ have_test_failure = have_test_failure or test_failure
+ all_failing_tests += failing_tests
# All tests completed. Include host-logs in the test results.
PrintHostLogContents(host_log_files)
- return host_log_files
+ return host_log_files, have_test_failure, all_failing_tests
-if __name__ == '__main__':
-
+def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f',
'--commands_file',
@@ -158,12 +159,16 @@
command_line_args = parser.parse_args()
host_logs = ''
try:
- host_logs = main(command_line_args)
- if TEST_FAILURE:
+ host_logs, had_test_failure, failing_tests = run_tests(command_line_args)
+ if had_test_failure:
print('++++++++++AT LEAST 1 TEST FAILED++++++++++')
- print(FAILING_TESTS.rstrip('\n'))
+ print(failing_tests.rstrip('\n'))
print('++++++++++++++++++++++++++++++++++++++++++')
raise Exception('At least one test failed.')
finally:
# Stop host and cleanup user-profile-dir.
TestMachineCleanup(command_line_args.user_profile_dir, host_logs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/flake_suppressor_common/tag_utils.py b/testing/flake_suppressor_common/tag_utils.py
index c00b1d6..74e69b9 100644
--- a/testing/flake_suppressor_common/tag_utils.py
+++ b/testing/flake_suppressor_common/tag_utils.py
@@ -10,8 +10,10 @@
TagUtils = None
+# TODO(crbug.com/358591565): Refactor this to remove the need for global
+# statements.
def SetTagUtilsImplementation(impl: Type['BaseTagUtils']) -> None:
- global TagUtils
+ global TagUtils # pylint: disable=global-statement
assert issubclass(impl, BaseTagUtils)
TagUtils = impl()
diff --git a/testing/libfuzzer/fuzzers/generate_v8_inspector_fuzzer_corpus.py b/testing/libfuzzer/fuzzers/generate_v8_inspector_fuzzer_corpus.py
index 9d7b1a3..233f8c80 100755
--- a/testing/libfuzzer/fuzzers/generate_v8_inspector_fuzzer_corpus.py
+++ b/testing/libfuzzer/fuzzers/generate_v8_inspector_fuzzer_corpus.py
@@ -9,28 +9,27 @@
import sys
load_regexp = re.compile(r'^\s*utils\.load\([\'"]([^\'"]+)[\'"]\);\s*$')
-load_root = None
-def resolve_loads(output_file, input_lines, loaded_files):
+def resolve_loads(output_file, input_lines, loaded_files, load_root):
for line in input_lines:
load_match = load_regexp.match(line)
if not load_match:
output_file.write(line)
continue
- load_file(output_file, load_match.group(1), loaded_files)
+ load_file(output_file, load_match.group(1), loaded_files, load_root)
-def load_file(output_file, input_file, loaded_files):
+def load_file(output_file, input_file, loaded_files, load_root):
if input_file in loaded_files:
sys.exit('Recursive load of \'{}\''.format(input_file))
loaded_files.add(input_file)
output_file.write('\n// Loaded from \'{}\':\n'.format(input_file))
with open(os.path.join(load_root, input_file)) as file:
- resolve_loads(output_file, file.readlines(), loaded_files)
+ resolve_loads(output_file, file.readlines(), loaded_files, load_root)
-def generate_content(output_file, input_file):
+def generate_content(output_file, input_file, load_root):
# The fuzzer does not provide the same methods on 'utils' as the
# inspector-test executable. Thus mock out non-existing ones via a proxy.
output_file.write("""
@@ -45,10 +44,10 @@
# Always prepend the 'protocol-test.js' file, which is always loaded first
# by the test runner for inspector tests.
protocol_test_file = os.path.join('test', 'inspector', 'protocol-test.js')
- load_file(output_file, protocol_test_file, set())
+ load_file(output_file, protocol_test_file, set(), load_root)
# Then load the actual input file, inlining all recursively loaded files.
- load_file(output_file, input_file, set())
+ load_file(output_file, input_file, set(), load_root)
def main():
@@ -67,7 +66,6 @@
# Loaded files are relative to the v8 root, which is two levels above the
# inspector test directory.
- global load_root
load_root = os.path.dirname(os.path.dirname(os.path.normpath(input_root)))
for parent, _, files in os.walk(input_root):
@@ -80,7 +78,7 @@
with open(output_file, 'w') as output_file:
abs_input_file = os.path.join(parent, filename)
rel_input_file = os.path.relpath(abs_input_file, load_root)
- generate_content(output_file, rel_input_file)
+ generate_content(output_file, rel_input_file, load_root)
# Done.
sys.exit(0)
diff --git a/testing/merge_scripts/code_coverage/merge_js_lib.py b/testing/merge_scripts/code_coverage/merge_js_lib.py
index 7cdc6b2..73965807 100644
--- a/testing/merge_scripts/code_coverage/merge_js_lib.py
+++ b/testing/merge_scripts/code_coverage/merge_js_lib.py
@@ -241,8 +241,8 @@
del coverage_json[key]
remapped_paths += 1
- logging.info('Remapped %s paths' % (remapped_paths))
- logging.info('Excluded %s paths' % (excluded_paths))
+ logging.info('Remapped %s paths', remapped_paths)
+ logging.info('Excluded %s paths', excluded_paths)
# Overwrite the current coverage file with new contents.
f.seek(0)
diff --git a/testing/merge_scripts/code_coverage/merge_js_results.py b/testing/merge_scripts/code_coverage/merge_js_results.py
index 57210b7..4562d867 100755
--- a/testing/merge_scripts/code_coverage/merge_js_results.py
+++ b/testing/merge_scripts/code_coverage/merge_js_results.py
@@ -65,9 +65,8 @@
logging.info('Excluding uninteresting lines from coverage')
javascript_merger.exclude_uninteresting_lines(coverage_file_path)
- logging.info(
- 'Remapping all paths relative to the src dir and removing any ' +
- 'files in the out dir')
+ logging.info('Remapping all paths relative to the src dir and removing any '
+ 'files in the out dir')
javascript_merger.remap_paths_to_relative(coverage_file_path,
params.chromium_src_dir,
params.build_dir)
diff --git a/testing/merge_scripts/code_coverage/merge_lib.py b/testing/merge_scripts/code_coverage/merge_lib.py
index 0d1237f..0234ccd 100644
--- a/testing/merge_scripts/code_coverage/merge_lib.py
+++ b/testing/merge_scripts/code_coverage/merge_lib.py
@@ -76,12 +76,12 @@
check=True)
logging.info(p.stdout)
except subprocess.CalledProcessError as error:
- logging.info('stdout: %s' % error.output)
- logging.error('Failed to merge profiles, return code (%d), error: %r' %
- (error.returncode, error.stderr))
+ logging.info('stdout: %s', error.output)
+ logging.error('Failed to merge profiles, return code (%d), error: %r',
+ error.returncode, error.stderr)
raise error
except subprocess.TimeoutExpired as e:
- logging.info('stdout: %s' % e.output)
+ logging.info('stdout: %s', e.output)
raise e
logging.info('Profile data is created as: "%r".', profile_output_file_path)
diff --git a/testing/merge_scripts/code_coverage/merge_results.py b/testing/merge_scripts/code_coverage/merge_results.py
index 4890e5a..3ce9118b 100755
--- a/testing/merge_scripts/code_coverage/merge_results.py
+++ b/testing/merge_scripts/code_coverage/merge_results.py
@@ -122,7 +122,7 @@
rc = subprocess.call(args)
if rc != 0:
failed = True
- logging.warning('%s exited with %s' % (merge_js_results_script, rc))
+ logging.warning('%s exited with %s', merge_js_results_script, rc)
# Name the output profdata file name as {test_target}.profdata or
# default.profdata.
@@ -177,8 +177,8 @@
rc = subprocess.call(args)
if rc != 0:
failed = True
- logging.warning('Additional merge script %s exited with %s' %
- (params.additional_merge_script, rc))
+ logging.warning('Additional merge script %s exited with %s',
+ params.additional_merge_script, rc)
elif len(params.jsons_to_merge) == 1:
logging.info("Only one output needs to be merged; directly copying it.")
with open(params.jsons_to_merge[0]) as f_read:
diff --git a/testing/merge_scripts/code_coverage/merge_results_test.py b/testing/merge_scripts/code_coverage/merge_results_test.py
index 72b05a6..af4545b 100755
--- a/testing/merge_scripts/code_coverage/merge_results_test.py
+++ b/testing/merge_scripts/code_coverage/merge_results_test.py
@@ -253,7 +253,7 @@
with mock.patch.object(os, 'remove'):
mock_walk.return_value = mock_input_dir_walk
with mock.patch.object(subprocess, 'run') as mock_exec_cmd:
- input_profdata_filename_pattern = '.+_unittests\.profdata'
+ input_profdata_filename_pattern = r'.+_unittests\.profdata'
merger.merge_profiles('/b/some/path', 'output/dir/default.profdata',
'.profdata', 'llvm-profdata',
input_profdata_filename_pattern)
diff --git a/testing/merge_scripts/code_coverage/merge_steps.py b/testing/merge_scripts/code_coverage/merge_steps.py
index 1e86b747..a23f79c 100755
--- a/testing/merge_scripts/code_coverage/merge_steps.py
+++ b/testing/merge_scripts/code_coverage/merge_steps.py
@@ -52,7 +52,7 @@
sparse=params.sparse,
merge_timeout=params.profile_merge_timeout)
if invalid_profiles:
- logging.error('Invalid profiles were generated:\n%r' % invalid_profiles)
+ logging.error('Invalid profiles were generated:\n%r', invalid_profiles)
return 1
return 0
diff --git a/testing/merge_scripts/standard_gtest_merge_test.py b/testing/merge_scripts/standard_gtest_merge_test.py
index 05ea01f..3f1bd5f 100755
--- a/testing/merge_scripts/standard_gtest_merge_test.py
+++ b/testing/merge_scripts/standard_gtest_merge_test.py
@@ -447,7 +447,7 @@
unicode_expectations = convert_to_unicode(expectation)
unicode_result = convert_to_unicode(result)
- self.assertEquals(unicode_expectations, unicode_result)
+ self.assertEqual(unicode_expectations, unicode_result)
def test_ok(self):
# Two shards, both successfully finished.
diff --git a/testing/merge_scripts/standard_isolated_script_merge_test.py b/testing/merge_scripts/standard_isolated_script_merge_test.py
index dd9ba3b..9bdb4c3 100755
--- a/testing/merge_scripts/standard_isolated_script_merge_test.py
+++ b/testing/merge_scripts/standard_isolated_script_merge_test.py
@@ -78,8 +78,8 @@
with open(output_json_file, 'r') as f:
results = json.load(f)
- self.assertEquals(results['successes'], ['fizz', 'baz', 'buzz', 'bar'])
- self.assertEquals(results['failures'], ['failing_test_one'])
+ self.assertEqual(results['successes'], ['fizz', 'baz', 'buzz', 'bar'])
+ self.assertEqual(results['failures'], ['failing_test_one'])
self.assertTrue(results['valid'])
def test_missing_shard(self):
@@ -94,11 +94,11 @@
with open(output_json_file, 'r') as f:
results = json.load(f)
- self.assertEquals(results['successes'], ['fizz', 'baz'])
- self.assertEquals(results['failures'], [])
+ self.assertEqual(results['successes'], ['fizz', 'baz'])
+ self.assertEqual(results['failures'], [])
self.assertTrue(results['valid'])
- self.assertEquals(results['global_tags'], ['UNRELIABLE_RESULTS'])
- self.assertEquals(results['missing_shards'], [1])
+ self.assertEqual(results['global_tags'], ['UNRELIABLE_RESULTS'])
+ self.assertEqual(results['missing_shards'], [1])
class InputParsingTest(StandardIsolatedScriptMergeTest):
@@ -142,8 +142,8 @@
exit_code = standard_isolated_script_merge.StandardIsolatedScriptMerge(
output_json_file, self.summary, self.test_files)
- self.assertEquals(0, exit_code)
- self.assertEquals([
+ self.assertEqual(0, exit_code)
+ self.assertEqual([
[{
'result0': [
'bar',
@@ -166,8 +166,8 @@
exit_code = standard_isolated_script_merge.StandardIsolatedScriptMerge(
output_json_file, self.summary, json_files)
- self.assertEquals(0, exit_code)
- self.assertEquals([[]], self.merge_test_results_args)
+ self.assertEqual(0, exit_code)
+ self.assertEqual([[]], self.merge_test_results_args)
class CommandLineTest(common_merge_script_tests.CommandLineTest):
diff --git a/testing/pylintrc b/testing/pylintrc
index f2dd127..ae028b0 100644
--- a/testing/pylintrc
+++ b/testing/pylintrc
@@ -22,28 +22,20 @@
# wrong-import-position: We often need to add directories to PATH before
# importing, which causes this check to fail.
disable=abstract-method,
- anomalous-backslash-in-string,
bad-indentation,
broad-except,
- consider-using-enumerate,
- deprecated-method,
- deprecated-module,
duplicate-code,
- global-statement,
fixme,
- function-redefined,
import-error,
invalid-name,
invalid-string-quote,
invalid-triple-quote,
locally-disabled,
locally-enabled,
- logging-not-lazy,
missing-docstring,
no-member,
no-self-use,
protected-access,
- reimported,
too-few-public-methods,
too-many-arguments,
too-many-branches,
@@ -247,7 +239,6 @@
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,
- string,
TERMIOS,
Bastion,
rexec
diff --git a/testing/scripts/rust/PRESUBMIT.py b/testing/scripts/rust/PRESUBMIT.py
index 70c04b9..ee1bbd7 100644
--- a/testing/scripts/rust/PRESUBMIT.py
+++ b/testing/scripts/rust/PRESUBMIT.py
@@ -18,7 +18,7 @@
input_api,
output_api,
this_dir,
- files_to_check=['.*unittest.*\.py$'],
+ files_to_check=[r'.*unittest.*\.py$'],
env=None))
return results
diff --git a/testing/scripts/rust/test_filtering.py b/testing/scripts/rust/test_filtering.py
index 38b3e75..b054e77 100644
--- a/testing/scripts/rust/test_filtering.py
+++ b/testing/scripts/rust/test_filtering.py
@@ -175,9 +175,9 @@
assert shard_index < total_shards
result = []
- for i in range(len(list_of_test_names)):
+ for i, test_name in enumerate(list_of_test_names):
if (i % total_shards) == shard_index:
- result.append(list_of_test_names[i])
+ result.append(test_name)
return result
diff --git a/testing/trigger_scripts/base_test_triggerer.py b/testing/trigger_scripts/base_test_triggerer.py
index debe11d6..ec6631e6 100755
--- a/testing/trigger_scripts/base_test_triggerer.py
+++ b/testing/trigger_scripts/base_test_triggerer.py
@@ -202,8 +202,8 @@
return json.load(f)
def remove_swarming_dimension(self, args, dimension):
- for i in range(len(args)):
- if args[i] == '--dimension' and args[i + 1] == dimension:
+ for i, argument in enumerate(args):
+ if argument == '--dimension' and args[i + 1] == dimension:
return args[:i] + args[i + 3:]
return args
diff --git a/testing/trigger_scripts/base_test_triggerer_unittest.py b/testing/trigger_scripts/base_test_triggerer_unittest.py
index c566460..da8291d 100755
--- a/testing/trigger_scripts/base_test_triggerer_unittest.py
+++ b/testing/trigger_scripts/base_test_triggerer_unittest.py
@@ -33,7 +33,7 @@
@unittest.skipUnless('TRIGGER_SCRIPT_INTEGRATION_TESTS' in os.environ,
'this is quick check test using real swarming server')
- def test_list_bots(self):
+ def test_list_tasks(self):
# This just checks list_bots runs without error.
triggerer = base_test_triggerer.BaseTestTriggerer()
with fake_filesystem_unittest.Pause(self):
diff --git a/testing/trigger_scripts/perf_device_trigger.py b/testing/trigger_scripts/perf_device_trigger.py
index d4522ac..2900078 100755
--- a/testing/trigger_scripts/perf_device_trigger.py
+++ b/testing/trigger_scripts/perf_device_trigger.py
@@ -362,8 +362,8 @@
# pylint: disable=inconsistent-return-statements
def _get_swarming_server(self, args):
- for i in range(len(args)):
- if '--swarming' in args[i]:
+ for i, argument in enumerate(args):
+ if '--swarming' in argument:
server = args[i + 1]
slashes_index = server.index('//') + 2
# Strip out the protocol
diff --git a/testing/trigger_scripts/perf_device_trigger_unittest.py b/testing/trigger_scripts/perf_device_trigger_unittest.py
index ed131e7..3f21ab7 100755
--- a/testing/trigger_scripts/perf_device_trigger_unittest.py
+++ b/testing/trigger_scripts/perf_device_trigger_unittest.py
@@ -195,13 +195,13 @@
alive_bots=['build3', 'build4', 'build5'],
dead_bots=['build1', 'build2'])
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(len(set(expected_task_assignment.values())), 3)
+ self.assertEqual(len(set(expected_task_assignment.values())), 3)
# All three bots were healthy so we should expect the task assignment to
# stay the same
- self.assertEquals(expected_task_assignment.get(0), 'build3')
- self.assertEquals(expected_task_assignment.get(1), 'build4')
- self.assertEquals(expected_task_assignment.get(2), 'build5')
+ self.assertEqual(expected_task_assignment.get(0), 'build3')
+ self.assertEqual(expected_task_assignment.get(1), 'build4')
+ self.assertEqual(expected_task_assignment.get(2), 'build5')
def test_no_bot_returned(self):
with self.assertRaises(ValueError) as context:
@@ -223,13 +223,13 @@
alive_bots=['build3', 'build4', 'build5'],
dead_bots=['build1', 'build2'])
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(len(set(expected_task_assignment.values())), 3)
+ self.assertEqual(len(set(expected_task_assignment.values())), 3)
# The first two should be assigned to one of the unassigned healthy bots
new_healthy_bots = ['build4', 'build5']
self.assertIn(expected_task_assignment.get(0), new_healthy_bots)
self.assertIn(expected_task_assignment.get(1), new_healthy_bots)
- self.assertEquals(expected_task_assignment.get(2), 'build3')
+ self.assertEqual(expected_task_assignment.get(2), 'build3')
def test_not_enough_healthy_bots(self):
triggerer = self.setup_and_trigger(
@@ -243,17 +243,17 @@
alive_bots=['build3', 'build4', 'build5'],
dead_bots=['build1', 'build2'])
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(len(set(expected_task_assignment.values())), 5)
+ self.assertEqual(len(set(expected_task_assignment.values())), 5)
# We have 5 shards and 5 bots that ran them, but two
# are now dead and there aren't any other healthy bots
# to swap out to. Make sure they still assign to the
# same shards.
- self.assertEquals(expected_task_assignment.get(0), 'build1')
- self.assertEquals(expected_task_assignment.get(1), 'build2')
- self.assertEquals(expected_task_assignment.get(2), 'build3')
- self.assertEquals(expected_task_assignment.get(3), 'build4')
- self.assertEquals(expected_task_assignment.get(4), 'build5')
+ self.assertEqual(expected_task_assignment.get(0), 'build1')
+ self.assertEqual(expected_task_assignment.get(1), 'build2')
+ self.assertEqual(expected_task_assignment.get(2), 'build3')
+ self.assertEqual(expected_task_assignment.get(3), 'build4')
+ self.assertEqual(expected_task_assignment.get(4), 'build5')
def test_not_enough_healthy_bots_shard_not_seen(self):
triggerer = self.setup_and_trigger(
@@ -267,18 +267,18 @@
alive_bots=['build3', 'build4', 'build5'],
dead_bots=['build1', 'build2'])
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(len(set(expected_task_assignment.values())), 5)
+ self.assertEqual(len(set(expected_task_assignment.values())), 5)
# Not enough healthy bots so make sure shard 0 is still assigned to its
# same dead bot.
- self.assertEquals(expected_task_assignment.get(0), 'build1')
+ self.assertEqual(expected_task_assignment.get(0), 'build1')
# Shard 1 had not been triggered yet, but there weren't enough
# healthy bots. Make sure it got assigned to the other dead bot.
- self.assertEquals(expected_task_assignment.get(1), 'build2')
+ self.assertEqual(expected_task_assignment.get(1), 'build2')
# The rest of the assignments should stay the same.
- self.assertEquals(expected_task_assignment.get(2), 'build3')
- self.assertEquals(expected_task_assignment.get(3), 'build4')
- self.assertEquals(expected_task_assignment.get(4), 'build5')
+ self.assertEqual(expected_task_assignment.get(2), 'build3')
+ self.assertEqual(expected_task_assignment.get(3), 'build4')
+ self.assertEqual(expected_task_assignment.get(4), 'build5')
def test_shards_not_triggered_yet(self):
# First time this configuration has been seen. Choose three
@@ -292,7 +292,7 @@
alive_bots=['build3', 'build4', 'build5'],
dead_bots=['build1', 'build2'])
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(len(set(expected_task_assignment.values())), 3)
+ self.assertEqual(len(set(expected_task_assignment.values())), 3)
new_healthy_bots = ['build3', 'build4', 'build5']
self.assertIn(expected_task_assignment.get(0), new_healthy_bots)
self.assertIn(expected_task_assignment.get(1), new_healthy_bots)
@@ -313,8 +313,8 @@
# Test that the new assignment will add a new bot to avoid
# assign 'build3' to both shard 0 & shard 1 as before.
# It also replaces the dead 'build6' bot.
- self.assertEquals(set(expected_task_assignment.values()),
- {'build3', 'build4', 'build5', 'build7'})
+ self.assertEqual(set(expected_task_assignment.values()),
+ {'build3', 'build4', 'build5', 'build7'})
def test_dynamic_sharding(self):
triggerer = self.setup_and_trigger(
@@ -329,8 +329,8 @@
use_dynamic_shards=True)
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(set(expected_task_assignment.values()),
- {'build1', 'build2', 'build3', 'build4', 'build5'})
+ self.assertEqual(set(expected_task_assignment.values()),
+ {'build1', 'build2', 'build3', 'build4', 'build5'})
def test_dynamic_sharding_with_dead_bots(self):
triggerer = self.setup_and_trigger(
@@ -345,8 +345,8 @@
use_dynamic_shards=True)
expected_task_assignment = self.get_triggered_shard_to_bot(triggerer)
- self.assertEquals(set(expected_task_assignment.values()),
- {'build2', 'build3', 'build5'})
+ self.assertEqual(set(expected_task_assignment.values()),
+ {'build2', 'build3', 'build5'})
if __name__ == '__main__':
diff --git a/testing/unexpected_passes_common/builders.py b/testing/unexpected_passes_common/builders.py
index d4b22f6..b5bb944b 100644
--- a/testing/unexpected_passes_common/builders.py
+++ b/testing/unexpected_passes_common/builders.py
@@ -35,6 +35,8 @@
FakeBuildersDict = Dict[data_types.BuilderEntry, Set[data_types.BuilderEntry]]
+# TODO(crbug.com/358591565): Refactor this to remove the need for global
+# statements.
_registered_instance = None
@@ -43,14 +45,14 @@
def RegisterInstance(instance: 'Builders') -> None:
- global _registered_instance
+ global _registered_instance # pylint: disable=global-statement
assert _registered_instance is None
assert isinstance(instance, Builders)
_registered_instance = instance
def ClearInstance() -> None:
- global _registered_instance
+ global _registered_instance # pylint: disable=global-statement
_registered_instance = None
diff --git a/testing/unexpected_passes_common/data_types.py b/testing/unexpected_passes_common/data_types.py
index 61b4711..5cae5d88 100644
--- a/testing/unexpected_passes_common/data_types.py
+++ b/testing/unexpected_passes_common/data_types.py
@@ -32,27 +32,29 @@
ResultSetType = Set['BaseResult']
+# TODO(crbug.com/358591565): Refactor this to remove the need for global
+# statements.
def SetExpectationImplementation(impl: Type['BaseExpectation']) -> None:
- global Expectation
+ global Expectation # pylint: disable=global-statement
assert issubclass(impl, BaseExpectation)
Expectation = impl
def SetResultImplementation(impl: Type['BaseResult']) -> None:
- global Result
+ global Result # pylint: disable=global-statement
assert issubclass(impl, BaseResult)
Result = impl
def SetBuildStatsImplementation(impl: Type['BaseBuildStats']) -> None:
- global BuildStats
+ global BuildStats # pylint: disable=global-statement
assert issubclass(impl, BaseBuildStats)
BuildStats = impl
def SetTestExpectationMapImplementation(impl: Type['BaseTestExpectationMap']
) -> None:
- global TestExpectationMap
+ global TestExpectationMap # pylint: disable=global-statement
assert issubclass(impl, BaseTestExpectationMap)
TestExpectationMap = impl
diff --git a/testing/unexpected_passes_common/expectations.py b/testing/unexpected_passes_common/expectations.py
index 8a939a14..8bacba73 100644
--- a/testing/unexpected_passes_common/expectations.py
+++ b/testing/unexpected_passes_common/expectations.py
@@ -124,6 +124,8 @@
# pylint: disable=useless-object-inheritance
+# TODO(crbug.com/358591565): Refactor this to remove the need for global
+# statements.
_registered_instance = None
@@ -132,14 +134,14 @@
def RegisterInstance(instance: 'Expectations') -> None:
- global _registered_instance
+ global _registered_instance # pylint: disable=global-statement
assert _registered_instance is None
assert isinstance(instance, Expectations)
_registered_instance = instance
def ClearInstance() -> None:
- global _registered_instance
+ global _registered_instance # pylint: disable=global-statement
_registered_instance = None
diff --git a/testing/unexpected_passes_common/queries_unittest.py b/testing/unexpected_passes_common/queries_unittest.py
index 390d813..c757b7454 100755
--- a/testing/unexpected_passes_common/queries_unittest.py
+++ b/testing/unexpected_passes_common/queries_unittest.py
@@ -37,10 +37,10 @@
with self.assertRaises(AssertionError):
uu.CreateGenericQuerier(num_samples=-1)
- def testInvalidNumSamples(self):
- """Tests that the number of samples is validated."""
- with self.assertRaises(AssertionError):
- uu.CreateGenericQuerier(num_samples=-1)
+ def testDefaultSamples(self):
+ """Tests that the number of samples is set to a default if not provided."""
+ querier = uu.CreateGenericQuerier(num_samples=0)
+ self.assertGreater(querier._num_samples, 0)
class GetBuilderGroupedQueryResultsUnittest(unittest.TestCase):
diff --git a/testing/xvfb.py b/testing/xvfb.py
index 789dc749..bcc72e59 100755
--- a/testing/xvfb.py
+++ b/testing/xvfb.py
@@ -192,7 +192,7 @@
specified width, height and refresh rate.
See: https://www.x.org/archive/X11R7.0/doc/html/chips4.html"""
re_matches = _re_search_command(
- 'Modeline "(.*)"\s+(.*)',
+ r'Modeline "(.*)"\s+(.*)',
['cvt', str(width), str(height),
str(refresh)],
)
@@ -284,8 +284,8 @@
call_xrandr(['--addmode', output_name, modeline_label])
(default_mode_label, _) = _make_xorg_modeline(*default_size, refresh_rate)
# Set the mode of all monitors to connect and activate them.
- for i in range(0, len(output_names)):
- args = ['--output', output_names[i], '--mode', default_mode_label]
+ for i, name in enumerate(output_names):
+ args = ['--output', name, '--mode', default_mode_label]
if i > 0:
args += ['--right-of', output_names[i - 1]]
call_xrandr(args)